1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
4 #include "dr_types.h"
5 #include "mlx5_ifc_dr_ste_v1.h"
6
7 enum dr_ptrn_modify_hdr_action_id {
8 DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
9 DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
10 DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
11 DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
12 DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
13 };
14
15 struct mlx5dr_ptrn_mgr {
16 struct mlx5dr_domain *dmn;
17 struct mlx5dr_icm_pool *ptrn_icm_pool;
18 /* cache for modify_header ptrn */
19 struct list_head ptrn_list;
20 struct mutex modify_hdr_mutex; /* protect the pattern cache */
21 };
22
23 /* Cache structure and functions */
dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,__be64 cur_hw_actions[],size_t num_of_actions,__be64 hw_actions[])24 static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
25 __be64 cur_hw_actions[],
26 size_t num_of_actions,
27 __be64 hw_actions[])
28 {
29 int i;
30
31 if (cur_num_of_actions != num_of_actions)
32 return false;
33
34 for (i = 0; i < num_of_actions; i++) {
35 u8 action_id =
36 MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
37
38 if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
39 if (hw_actions[i] != cur_hw_actions[i])
40 return false;
41 } else {
42 if ((__force __be32)hw_actions[i] !=
43 (__force __be32)cur_hw_actions[i])
44 return false;
45 }
46 }
47
48 return true;
49 }
50
51 static struct mlx5dr_ptrn_obj *
dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr * mgr,size_t num_of_actions,__be64 hw_actions[])52 dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
53 size_t num_of_actions,
54 __be64 hw_actions[])
55 {
56 struct mlx5dr_ptrn_obj *cached_pattern;
57 struct mlx5dr_ptrn_obj *tmp;
58
59 list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
60 if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
61 (__be64 *)cached_pattern->data,
62 num_of_actions,
63 hw_actions)) {
64 /* Put this pattern in the head of the list,
65 * as we will probably use it more.
66 */
67 list_del_init(&cached_pattern->list);
68 list_add(&cached_pattern->list, &mgr->ptrn_list);
69 return cached_pattern;
70 }
71 }
72
73 return NULL;
74 }
75
76 static struct mlx5dr_ptrn_obj *
dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr * mgr,u16 num_of_actions,u8 * data)77 dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
78 u16 num_of_actions, u8 *data)
79 {
80 struct mlx5dr_ptrn_obj *pattern;
81 struct mlx5dr_icm_chunk *chunk;
82 u32 chunk_size;
83 u32 index;
84
85 chunk_size = ilog2(roundup_pow_of_two(num_of_actions));
86 /* HW modify action index granularity is at least 64B */
87 chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
88
89 chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
90 if (!chunk)
91 return NULL;
92
93 index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
94 mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
95 DR_ACTION_CACHE_LINE_SIZE;
96
97 pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
98 if (!pattern)
99 goto free_chunk;
100
101 pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
102 sizeof(*pattern->data), GFP_KERNEL);
103 if (!pattern->data)
104 goto free_pattern;
105
106 memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
107 pattern->chunk = chunk;
108 pattern->index = index;
109 pattern->num_of_actions = num_of_actions;
110
111 list_add(&pattern->list, &mgr->ptrn_list);
112 refcount_set(&pattern->refcount, 1);
113
114 return pattern;
115
116 free_pattern:
117 kfree(pattern);
118 free_chunk:
119 mlx5dr_icm_free_chunk(chunk);
120 return NULL;
121 }
122
123 static void
dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj * pattern)124 dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
125 {
126 list_del(&pattern->list);
127 mlx5dr_icm_free_chunk(pattern->chunk);
128 kfree(pattern->data);
129 kfree(pattern);
130 }
131
132 struct mlx5dr_ptrn_obj *
mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr * mgr,u16 num_of_actions,u8 * data)133 mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
134 u16 num_of_actions,
135 u8 *data)
136 {
137 struct mlx5dr_ptrn_obj *pattern;
138 u64 *hw_actions;
139 u8 action_id;
140 int i;
141
142 mutex_lock(&mgr->modify_hdr_mutex);
143 pattern = dr_ptrn_find_cached_pattern(mgr,
144 num_of_actions,
145 (__be64 *)data);
146 if (!pattern) {
147 /* Alloc and add new pattern to cache */
148 pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
149 if (!pattern)
150 goto out_unlock;
151
152 hw_actions = (u64 *)pattern->data;
153 /* Here we mask the pattern data to create a valid pattern
154 * since we do an OR operation between the arg and pattern
155 */
156 for (i = 0; i < num_of_actions; i++) {
157 action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
158
159 if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
160 action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
161 action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
162 MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
163 }
164
165 if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
166 num_of_actions, pattern->data)) {
167 refcount_dec(&pattern->refcount);
168 goto free_pattern;
169 }
170 } else {
171 refcount_inc(&pattern->refcount);
172 }
173
174 mutex_unlock(&mgr->modify_hdr_mutex);
175
176 return pattern;
177
178 free_pattern:
179 dr_ptrn_free_pattern(pattern);
180 out_unlock:
181 mutex_unlock(&mgr->modify_hdr_mutex);
182 return NULL;
183 }
184
185 void
mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr * mgr,struct mlx5dr_ptrn_obj * pattern)186 mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
187 struct mlx5dr_ptrn_obj *pattern)
188 {
189 mutex_lock(&mgr->modify_hdr_mutex);
190
191 if (refcount_dec_and_test(&pattern->refcount))
192 dr_ptrn_free_pattern(pattern);
193
194 mutex_unlock(&mgr->modify_hdr_mutex);
195 }
196
mlx5dr_ptrn_mgr_create(struct mlx5dr_domain * dmn)197 struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
198 {
199 struct mlx5dr_ptrn_mgr *mgr;
200
201 if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
202 return NULL;
203
204 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
205 if (!mgr)
206 return NULL;
207
208 mgr->dmn = dmn;
209 mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
210 if (!mgr->ptrn_icm_pool) {
211 mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
212 goto free_mgr;
213 }
214
215 INIT_LIST_HEAD(&mgr->ptrn_list);
216 mutex_init(&mgr->modify_hdr_mutex);
217
218 return mgr;
219
220 free_mgr:
221 kfree(mgr);
222 return NULL;
223 }
224
mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr * mgr)225 void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
226 {
227 struct mlx5dr_ptrn_obj *pattern;
228 struct mlx5dr_ptrn_obj *tmp;
229
230 if (!mgr)
231 return;
232
233 WARN_ON(!list_empty(&mgr->ptrn_list));
234
235 list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
236 list_del(&pattern->list);
237 kfree(pattern->data);
238 kfree(pattern);
239 }
240
241 mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
242 mutex_destroy(&mgr->modify_hdr_mutex);
243 kfree(mgr);
244 }
245