xref: /openbmc/linux/drivers/infiniband/hw/mlx5/qos.c (revision 8ffdff6a)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2020, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <rdma/uverbs_ioctl.h>
7 #include <rdma/mlx5_user_ioctl_cmds.h>
8 #include <rdma/mlx5_user_ioctl_verbs.h>
9 #include <linux/mlx5/driver.h>
10 #include "mlx5_ib.h"
11 
12 #define UVERBS_MODULE_NAME mlx5_ib
13 #include <rdma/uverbs_named_ioctl.h>
14 
15 static bool pp_is_supported(struct ib_device *device)
16 {
17 	struct mlx5_ib_dev *dev = to_mdev(device);
18 
19 	return (MLX5_CAP_GEN(dev->mdev, qos) &&
20 		MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
21 		MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
22 }
23 
24 static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
25 	struct uverbs_attr_bundle *attrs)
26 {
27 	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
28 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
29 		MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
30 	struct mlx5_ib_dev *dev;
31 	struct mlx5_ib_ucontext *c;
32 	struct mlx5_ib_pp *pp_entry;
33 	void *in_ctx;
34 	u16 uid;
35 	int inlen;
36 	u32 flags;
37 	int err;
38 
39 	c = to_mucontext(ib_uverbs_get_ucontext(attrs));
40 	if (IS_ERR(c))
41 		return PTR_ERR(c);
42 
43 	/* The allocated entry can be used only by a DEVX context */
44 	if (!c->devx_uid)
45 		return -EINVAL;
46 
47 	dev = to_mdev(c->ibucontext.device);
48 	pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
49 	if (!pp_entry)
50 		return -ENOMEM;
51 
52 	in_ctx = uverbs_attr_get_alloced_ptr(attrs,
53 					     MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
54 	inlen = uverbs_attr_get_len(attrs,
55 				    MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
56 	memcpy(rl_raw, in_ctx, inlen);
57 	err = uverbs_get_flags32(&flags, attrs,
58 		MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
59 		MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
60 	if (err)
61 		goto err;
62 
63 	uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
64 		c->devx_uid : MLX5_SHARED_RESOURCE_UID;
65 
66 	err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
67 			(flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
68 			&pp_entry->index);
69 	if (err)
70 		goto err;
71 
72 	pp_entry->mdev = dev->mdev;
73 	uobj->object = pp_entry;
74 	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
75 
76 	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
77 			     &pp_entry->index, sizeof(pp_entry->index));
78 	return err;
79 
80 err:
81 	kfree(pp_entry);
82 	return err;
83 }
84 
85 static int pp_obj_cleanup(struct ib_uobject *uobject,
86 			  enum rdma_remove_reason why,
87 			  struct uverbs_attr_bundle *attrs)
88 {
89 	struct mlx5_ib_pp *pp_entry = uobject->object;
90 
91 	mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
92 	kfree(pp_entry);
93 	return 0;
94 }
95 
96 DECLARE_UVERBS_NAMED_METHOD(
97 	MLX5_IB_METHOD_PP_OBJ_ALLOC,
98 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
99 			MLX5_IB_OBJECT_PP,
100 			UVERBS_ACCESS_NEW,
101 			UA_MANDATORY),
102 	UVERBS_ATTR_PTR_IN(
103 		MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
104 		UVERBS_ATTR_SIZE(1,
105 			MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
106 		UA_MANDATORY,
107 		UA_ALLOC_AND_COPY),
108 	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
109 			enum mlx5_ib_uapi_pp_alloc_flags,
110 			UA_MANDATORY),
111 	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
112 			   UVERBS_ATTR_TYPE(u16),
113 			   UA_MANDATORY));
114 
115 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
116 	MLX5_IB_METHOD_PP_OBJ_DESTROY,
117 	UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
118 			MLX5_IB_OBJECT_PP,
119 			UVERBS_ACCESS_DESTROY,
120 			UA_MANDATORY));
121 
122 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
123 			    UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
124 			    &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
125 			    &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
126 
127 
128 const struct uapi_definition mlx5_ib_qos_defs[] = {
129 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
130 		MLX5_IB_OBJECT_PP,
131 		UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
132 	{},
133 };
134