summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core/nldev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 15:20:30 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 15:20:30 -1000
commit43468456c95b9e13c0592de51a9a530caa525c1f (patch)
treef5e26b08837c4a10f9979469d5ed85ce9f42b2a5 /drivers/infiniband/core/nldev.c
parent6ed92e559a2ea572ae2bac5cbeddd1dc8cb667b6 (diff)
parent2ef422f063b74adcc4a4a9004b0a87bb55e0a836 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Nothing exciting this cycle, most of the diffstat is changing SPDX 'or' to 'OR'. Summary: - Bugfixes for hns, mlx5, and hfi1 - Hardening patches for size_*, counted_by, strscpy - rts fixes from static analysis - Dump SRQ objects in rdma netlink, with hns support - Fix a performance regression in mlx5 MR deregistration - New XDR (200Gb/lane) link speed - SRQ record doorbell latency optimization for hns - IPSEC support for mlx5 multi-port mode - ibv_rereg_mr() support for irdma - Affiliated event support for bnxt_re - Opt out for the spec compliant qkey security enforcement as we discovered SW that breaks under enforcement - Comment and trivial updates" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (50 commits) IB/mlx5: Fix init stage error handling to avoid double free of same QP and UAF RDMA/mlx5: Fix mkey cache WQ flush RDMA/hfi1: Workaround truncation compilation error IB/hfi1: Fix potential deadlock on &irq_src_lock and &dd->uctxt_lock RDMA/core: Remove NULL check before dev_{put, hold} RDMA/hfi1: Remove redundant assignment to pointer ppd RDMA/mlx5: Change the key being sent for MPV device affiliation RDMA/bnxt_re: Fix clang -Wimplicit-fallthrough in bnxt_re_handle_cq_async_error() RDMA/hns: Fix init failure of RoCE VF and HIP08 RDMA/hns: Fix unnecessary port_num transition in HW stats allocation RDMA/hns: The UD mode can only be configured with DCQCN RDMA/hns: Add check for SL RDMA/hns: Fix signed-unsigned mixed comparisons RDMA/hns: Fix uninitialized ucmd in hns_roce_create_qp_common() RDMA/hns: Fix printing level of asynchronous events RDMA/core: Add support to set privileged QKEY parameter RDMA/bnxt_re: Do not report SRQ error in srq notification RDMA/bnxt_re: Report async events and errors RDMA/bnxt_re: Update HW interface headers IB/mlx5: Fix rdma counter binding for RAW QP ...
Diffstat (limited to 'drivers/infiniband/core/nldev.c')
-rw-r--r--drivers/infiniband/core/nldev.c92
1 files changed, 81 insertions, 11 deletions
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 6d1dbc978759..4900a0848124 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -43,6 +43,13 @@
#include "restrack.h"
#include "uverbs.h"
+/*
+ * This determines whether a non-privileged user is allowed to specify a
+ * controlled QKEY or not, when true non-privileged user is allowed to specify
+ * a controlled QKEY.
+ */
+static bool privileged_qkey;
+
typedef int (*res_fill_func_t)(struct sk_buff*, bool,
struct rdma_restrack_entry*, uint32_t);
@@ -156,6 +163,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
+ [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -237,6 +245,12 @@ int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
}
EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
+bool rdma_nl_get_privileged_qkey(void)
+{
+ return privileged_qkey || capable(CAP_NET_RAW);
+}
+EXPORT_SYMBOL(rdma_nl_get_privileged_qkey);
+
static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
@@ -357,8 +371,7 @@ static int fill_port_info(struct sk_buff *msg,
}
out:
- if (netdev)
- dev_put(netdev);
+ dev_put(netdev);
return ret;
}
@@ -818,6 +831,7 @@ static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
struct ib_srq *srq = container_of(res, struct ib_srq, res);
+ struct ib_device *dev = srq->device;
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
goto err;
@@ -837,12 +851,29 @@ static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
if (fill_res_srq_qps(msg, srq))
goto err;
- return fill_res_name_pid(msg, res);
+ if (fill_res_name_pid(msg, res))
+ goto err;
+
+ if (dev->ops.fill_res_srq_entry)
+ return dev->ops.fill_res_srq_entry(msg, srq);
+
+ return 0;
err:
return -EMSGSIZE;
}
+static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_srq *srq = container_of(res, struct ib_srq, res);
+ struct ib_device *dev = srq->device;
+
+ if (!dev->ops.fill_res_srq_entry_raw)
+ return -EINVAL;
+ return dev->ops.fill_res_srq_entry_raw(msg, srq);
+}
+
static int fill_stat_counter_mode(struct sk_buff *msg,
struct rdma_counter *counter)
{
@@ -1652,6 +1683,7 @@ RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
+RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ);
static LIST_HEAD(link_ops);
static DECLARE_RWSEM(link_ops_rwsem);
@@ -1882,6 +1914,12 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
+ err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE,
+ (u8)privileged_qkey);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
/*
* Copy-on-fork is supported.
* See commits:
@@ -1898,18 +1936,11 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
}
-static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+static int nldev_set_sys_set_netns_doit(struct nlattr *tb[])
{
- struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
u8 enable;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
- if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
- return -EINVAL;
-
enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
/* Only 0 and 1 are supported */
if (enable > 1)
@@ -1919,6 +1950,40 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
+static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[])
+{
+ u8 enable;
+
+ enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]);
+ /* Only 0 and 1 are supported */
+ if (enable > 1)
+ return -EINVAL;
+
+ privileged_qkey = enable;
+ return 0;
+}
+
+static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
+ return nldev_set_sys_set_netns_doit(tb);
+
+ if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE])
+ return nldev_set_sys_set_pqkey_doit(tb);
+
+ return -EINVAL;
+}
+
+
static int nldev_stat_set_mode_doit(struct sk_buff *msg,
struct netlink_ext_ack *extack,
struct nlattr *tb[],
@@ -2558,6 +2623,11 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_mr_raw_dumpit,
.flags = RDMA_NL_ADMIN_PERM,
},
+ [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = {
+ .doit = nldev_res_get_srq_raw_doit,
+ .dump = nldev_res_get_srq_raw_dumpit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
.doit = nldev_stat_get_counter_status_doit,
},