1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include <linux/types.h> 5 #include <linux/crc32.h> 6 #include "dr_types.h" 7 8 #define DR_STE_CRC_POLY 0xEDB88320L 9 #define STE_IPV4 0x1 10 #define STE_IPV6 0x2 11 #define STE_TCP 0x1 12 #define STE_UDP 0x2 13 #define STE_SPI 0x3 14 #define IP_VERSION_IPV4 0x4 15 #define IP_VERSION_IPV6 0x6 16 #define STE_SVLAN 0x1 17 #define STE_CVLAN 0x2 18 19 #define DR_STE_ENABLE_FLOW_TAG BIT(31) 20 21 /* Set to STE a specific value using DR_STE_SET */ 22 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \ 23 if ((spec)->s_fname) { \ 24 MLX5_SET(ste_##lookup_type, tag, t_fname, value); \ 25 (spec)->s_fname = 0; \ 26 } \ 27 } while (0) 28 29 /* Set to STE spec->s_fname to tag->t_fname */ 30 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \ 31 DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname) 32 33 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */ 34 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \ 35 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1) 36 37 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */ 38 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \ 39 DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname) 40 41 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \ 42 MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \ 43 MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \ 44 MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \ 45 MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \ 46 MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \ 47 MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \ 48 MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \ 49 MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \ 50 MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \ 51 } while (0) 52 53 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \ 54 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \ 55 in_out##_first_mpls_label);\ 56 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \ 57 in_out##_first_mpls_s_bos); \ 58 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \ 59 in_out##_first_mpls_exp); \ 60 DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \ 61 in_out##_first_mpls_ttl); \ 62 } while (0) 63 64 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \ 65 DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \ 66 in_out##_first_mpls_label);\ 67 DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \ 68 in_out##_first_mpls_s_bos); \ 69 DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \ 70 in_out##_first_mpls_exp); \ 71 DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \ 72 in_out##_first_mpls_ttl); \ 73 } while (0) 74 75 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ 76 (_misc)->outer_first_mpls_over_gre_label || \ 77 (_misc)->outer_first_mpls_over_gre_exp || \ 78 (_misc)->outer_first_mpls_over_gre_s_bos || \ 79 (_misc)->outer_first_mpls_over_gre_ttl) 80 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\ 81 (_misc)->outer_first_mpls_over_udp_label || \ 82 (_misc)->outer_first_mpls_over_udp_exp || \ 83 (_misc)->outer_first_mpls_over_udp_s_bos || \ 84 (_misc)->outer_first_mpls_over_udp_ttl) 85 86 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \ 87 ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \ 88 (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \ 89 MLX5DR_STE_LU_TYPE_##lookup_type##_O) 90 91 enum dr_ste_tunl_action { 92 DR_STE_TUNL_ACTION_NONE = 0, 93 DR_STE_TUNL_ACTION_ENABLE = 1, 94 DR_STE_TUNL_ACTION_DECAP = 2, 95 DR_STE_TUNL_ACTION_L3_DECAP = 3, 96 DR_STE_TUNL_ACTION_POP_VLAN = 4, 97 }; 98 99 enum dr_ste_action_type { 100 DR_STE_ACTION_TYPE_PUSH_VLAN = 1, 101 DR_STE_ACTION_TYPE_ENCAP_L3 = 3, 102 DR_STE_ACTION_TYPE_ENCAP = 4, 103 }; 104 105 struct dr_hw_ste_format { 106 u8 ctrl[DR_STE_SIZE_CTRL]; 107 u8 tag[DR_STE_SIZE_TAG]; 108 u8 mask[DR_STE_SIZE_MASK]; 109 }; 110 111 static u32 dr_ste_crc32_calc(const void *input_data, size_t length) 112 { 113 u32 crc = crc32(0, input_data, length); 114 115 return (__force u32)htonl(crc); 116 } 117 118 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) 119 { 120 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 121 u8 masked[DR_STE_SIZE_TAG] = {}; 122 u32 crc32, index; 123 u16 bit; 124 int i; 125 126 /* Don't calculate CRC if the result is predicted */ 127 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0) 128 return 0; 129 130 /* Mask tag using byte mask, bit per byte */ 131 bit = 1 << (DR_STE_SIZE_TAG - 1); 132 for (i = 0; i < DR_STE_SIZE_TAG; i++) { 133 if (htbl->byte_mask & bit) 134 masked[i] = hw_ste->tag[i]; 135 136 bit = bit >> 1; 137 } 138 139 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG); 140 index = crc32 & (htbl->chunk->num_of_entries - 1); 141 142 return index; 143 } 144 145 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) 146 { 147 u16 byte_mask = 0; 148 int i; 149 150 for (i = 0; i < DR_STE_SIZE_MASK; i++) { 151 byte_mask = byte_mask << 1; 152 if (bit_mask[i] == 0xff) 153 byte_mask |= 1; 154 } 155 return byte_mask; 156 } 157 158 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask) 159 { 160 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 161 162 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK); 163 } 164 165 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) 166 { 167 MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, 168 DR_STE_ENABLE_FLOW_TAG | flow_tag); 169 } 170 171 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id) 172 { 173 /* This can be used for both rx_steering_mult and for sx_transmit */ 174 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id); 175 MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16); 176 } 177 178 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p) 179 { 180 MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1); 181 } 182 183 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr, 184 bool go_back) 185 { 186 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, 187 DR_STE_ACTION_TYPE_PUSH_VLAN); 188 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr); 189 /* Due to HW limitation we need to set this bit, otherwise reforamt + 190 * push vlan will not work. 191 */ 192 if (go_back) 193 mlx5dr_ste_set_go_back_bit(hw_ste_p); 194 } 195 196 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3) 197 { 198 MLX5_SET(ste_sx_transmit, hw_ste_p, action_type, 199 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP); 200 /* The hardware expects here size in words (2 byte) */ 201 MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2); 202 MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id); 203 } 204 205 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p) 206 { 207 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, 208 DR_STE_TUNL_ACTION_DECAP); 209 } 210 211 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p) 212 { 213 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, 214 DR_STE_TUNL_ACTION_POP_VLAN); 215 } 216 217 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan) 218 { 219 MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action, 220 DR_STE_TUNL_ACTION_L3_DECAP); 221 MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0); 222 } 223 224 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type) 225 { 226 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); 227 } 228 229 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p) 230 { 231 return MLX5_GET(ste_general, hw_ste_p, entry_type); 232 } 233 234 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, 235 u32 re_write_index) 236 { 237 MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions, 238 num_of_actions); 239 MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer, 240 re_write_index); 241 } 242 243 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) 244 { 245 MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi); 246 } 247 248 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, 249 u16 gvmi) 250 { 251 MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type); 252 MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type); 253 MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); 254 255 /* Set GVMI once, this is the same for RX/TX 256 * bits 63_48 of next table base / miss address encode the next GVMI 257 */ 258 MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi); 259 MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi); 260 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); 261 } 262 263 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste) 264 { 265 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag)); 266 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask)); 267 } 268 269 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste) 270 { 271 hw_ste->tag[0] = 0xdc; 272 hw_ste->mask[0] = 0; 273 } 274 275 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste) 276 { 277 u64 index = 278 (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) | 279 MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26); 280 281 return index << 6; 282 } 283 284 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size) 285 { 286 u64 index = (icm_addr >> 5) | ht_size; 287 288 MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27); 289 MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index); 290 } 291 292 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste) 293 { 294 u32 index = ste - ste->htbl->ste_arr; 295 296 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index; 297 } 298 299 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste) 300 { 301 u32 index = ste - ste->htbl->ste_arr; 302 303 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index; 304 } 305 306 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste) 307 { 308 u32 index = ste - ste->htbl->ste_arr; 309 310 return &ste->htbl->miss_list[index]; 311 } 312 313 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste, 314 struct mlx5dr_ste_htbl *next_htbl) 315 { 316 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; 317 u8 *hw_ste = ste->hw_ste; 318 319 MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask); 320 MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type); 321 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); 322 323 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste); 324 } 325 326 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, 327 u8 ste_location) 328 { 329 return ste_location == nic_matcher->num_of_builders; 330 } 331 332 /* Replace relevant fields, except of: 333 * htbl - keep the origin htbl 334 * miss_list + list - already took the src from the list. 335 * icm_addr/mr_addr - depends on the hosting table. 336 * 337 * Before: 338 * | a | -> | b | -> | c | -> 339 * 340 * After: 341 * | a | -> | c | -> 342 * While the data that was in b copied to a. 343 */ 344 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) 345 { 346 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED); 347 dst->next_htbl = src->next_htbl; 348 if (dst->next_htbl) 349 dst->next_htbl->pointing_ste = dst; 350 351 dst->refcount = src->refcount; 352 353 INIT_LIST_HEAD(&dst->rule_list); 354 list_splice_tail_init(&src->rule_list, &dst->rule_list); 355 } 356 357 /* Free ste which is the head and the only one in miss_list */ 358 static void 359 dr_ste_remove_head_ste(struct mlx5dr_ste *ste, 360 struct mlx5dr_matcher_rx_tx *nic_matcher, 361 struct mlx5dr_ste_send_info *ste_info_head, 362 struct list_head *send_ste_list, 363 struct mlx5dr_ste_htbl *stats_tbl) 364 { 365 u8 tmp_data_ste[DR_STE_SIZE] = {}; 366 struct mlx5dr_ste tmp_ste = {}; 367 u64 miss_addr; 368 369 tmp_ste.hw_ste = tmp_data_ste; 370 371 /* Use temp ste because dr_ste_always_miss_addr 372 * touches bit_mask area which doesn't exist at ste->hw_ste. 373 */ 374 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); 375 miss_addr = nic_matcher->e_anchor->chunk->icm_addr; 376 mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr); 377 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED); 378 379 list_del_init(&ste->miss_list_node); 380 381 /* Write full STE size in order to have "always_miss" */ 382 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 383 0, tmp_data_ste, 384 ste_info_head, 385 send_ste_list, 386 true /* Copy data */); 387 388 stats_tbl->ctrl.num_of_valid_entries--; 389 } 390 391 /* Free ste which is the head but NOT the only one in miss_list: 392 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0 393 */ 394 static void 395 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste, 396 struct mlx5dr_ste_send_info *ste_info_head, 397 struct list_head *send_ste_list, 398 struct mlx5dr_ste_htbl *stats_tbl) 399 400 { 401 struct mlx5dr_ste_htbl *next_miss_htbl; 402 403 next_miss_htbl = next_ste->htbl; 404 405 /* Remove from the miss_list the next_ste before copy */ 406 list_del_init(&next_ste->miss_list_node); 407 408 /* All rule-members that use next_ste should know about that */ 409 mlx5dr_rule_update_rule_member(next_ste, ste); 410 411 /* Move data from next into ste */ 412 dr_ste_replace(ste, next_ste); 413 414 /* Del the htbl that contains the next_ste. 415 * The origin htbl stay with the same number of entries. 416 */ 417 mlx5dr_htbl_put(next_miss_htbl); 418 419 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED, 420 0, ste->hw_ste, 421 ste_info_head, 422 send_ste_list, 423 true /* Copy data */); 424 425 stats_tbl->ctrl.num_of_collisions--; 426 stats_tbl->ctrl.num_of_valid_entries--; 427 } 428 429 /* Free ste that is located in the middle of the miss list: 430 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_| 431 */ 432 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste, 433 struct mlx5dr_ste_send_info *ste_info, 434 struct list_head *send_ste_list, 435 struct mlx5dr_ste_htbl *stats_tbl) 436 { 437 struct mlx5dr_ste *prev_ste; 438 u64 miss_addr; 439 440 prev_ste = list_prev_entry(ste, miss_list_node); 441 if (WARN_ON(!prev_ste)) 442 return; 443 444 miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste); 445 mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr); 446 447 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0, 448 prev_ste->hw_ste, ste_info, 449 send_ste_list, true /* Copy data*/); 450 451 list_del_init(&ste->miss_list_node); 452 453 stats_tbl->ctrl.num_of_valid_entries--; 454 stats_tbl->ctrl.num_of_collisions--; 455 } 456 457 void mlx5dr_ste_free(struct mlx5dr_ste *ste, 458 struct mlx5dr_matcher *matcher, 459 struct mlx5dr_matcher_rx_tx *nic_matcher) 460 { 461 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info; 462 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 463 struct mlx5dr_ste_send_info ste_info_head; 464 struct mlx5dr_ste *next_ste, *first_ste; 465 bool put_on_origin_table = true; 466 struct mlx5dr_ste_htbl *stats_tbl; 467 LIST_HEAD(send_ste_list); 468 469 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste), 470 struct mlx5dr_ste, miss_list_node); 471 stats_tbl = first_ste->htbl; 472 473 /* Two options: 474 * 1. ste is head: 475 * a. head ste is the only ste in the miss list 476 * b. head ste is not the only ste in the miss-list 477 * 2. ste is not head 478 */ 479 if (first_ste == ste) { /* Ste is the head */ 480 struct mlx5dr_ste *last_ste; 481 482 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste), 483 struct mlx5dr_ste, miss_list_node); 484 if (last_ste == first_ste) 485 next_ste = NULL; 486 else 487 next_ste = list_next_entry(ste, miss_list_node); 488 489 if (!next_ste) { 490 /* One and only entry in the list */ 491 dr_ste_remove_head_ste(ste, nic_matcher, 492 &ste_info_head, 493 &send_ste_list, 494 stats_tbl); 495 } else { 496 /* First but not only entry in the list */ 497 dr_ste_replace_head_ste(ste, next_ste, &ste_info_head, 498 &send_ste_list, stats_tbl); 499 put_on_origin_table = false; 500 } 501 } else { /* Ste in the middle of the list */ 502 dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl); 503 } 504 505 /* Update HW */ 506 list_for_each_entry_safe(cur_ste_info, tmp_ste_info, 507 &send_ste_list, send_list) { 508 list_del(&cur_ste_info->send_list); 509 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste, 510 cur_ste_info->data, cur_ste_info->size, 511 cur_ste_info->offset); 512 } 513 514 if (put_on_origin_table) 515 mlx5dr_htbl_put(ste->htbl); 516 } 517 518 bool mlx5dr_ste_equal_tag(void *src, void *dst) 519 { 520 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src; 521 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst; 522 523 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG); 524 } 525 526 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, 527 struct mlx5dr_ste_htbl *next_htbl) 528 { 529 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; 530 531 mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); 532 } 533 534 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) 535 { 536 u64 index = miss_addr >> 6; 537 538 /* Miss address for TX and RX STEs located in the same offsets */ 539 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26); 540 MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index); 541 } 542 543 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr) 544 { 545 u8 *hw_ste = ste->hw_ste; 546 547 MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE); 548 mlx5dr_ste_set_miss_addr(hw_ste, miss_addr); 549 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste); 550 } 551 552 /* The assumption here is that we don't update the ste->hw_ste if it is not 553 * used ste, so it will be all zero, checking the next_lu_type. 554 */ 555 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) 556 { 557 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste; 558 559 if (MLX5_GET(ste_general, hw_ste, next_lu_type) == 560 MLX5DR_STE_LU_TYPE_NOP) 561 return true; 562 563 return false; 564 } 565 566 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) 567 { 568 return !ste->refcount; 569 } 570 571 /* Init one ste as a pattern for ste data array */ 572 void mlx5dr_ste_set_formatted_ste(u16 gvmi, 573 struct mlx5dr_domain_rx_tx *nic_dmn, 574 struct mlx5dr_ste_htbl *htbl, 575 u8 *formatted_ste, 576 struct mlx5dr_htbl_connect_info *connect_info) 577 { 578 struct mlx5dr_ste ste = {}; 579 580 mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); 581 ste.hw_ste = formatted_ste; 582 583 if (connect_info->type == CONNECT_HIT) 584 dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl); 585 else 586 mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr); 587 } 588 589 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, 590 struct mlx5dr_domain_rx_tx *nic_dmn, 591 struct mlx5dr_ste_htbl *htbl, 592 struct mlx5dr_htbl_connect_info *connect_info, 593 bool update_hw_ste) 594 { 595 u8 formatted_ste[DR_STE_SIZE] = {}; 596 597 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi, 598 nic_dmn, 599 htbl, 600 formatted_ste, 601 connect_info); 602 603 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste); 604 } 605 606 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, 607 struct mlx5dr_matcher_rx_tx *nic_matcher, 608 struct mlx5dr_ste *ste, 609 u8 *cur_hw_ste, 610 enum mlx5dr_icm_chunk_size log_table_size) 611 { 612 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste; 613 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 614 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 615 struct mlx5dr_htbl_connect_info info; 616 struct mlx5dr_ste_htbl *next_htbl; 617 618 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { 619 u8 next_lu_type; 620 u16 byte_mask; 621 622 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); 623 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); 624 625 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, 626 log_table_size, 627 next_lu_type, 628 byte_mask); 629 if (!next_htbl) { 630 mlx5dr_dbg(dmn, "Failed allocating table\n"); 631 return -ENOMEM; 632 } 633 634 /* Write new table to HW */ 635 info.type = CONNECT_MISS; 636 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; 637 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl, 638 &info, false)) { 639 mlx5dr_info(dmn, "Failed writing table to HW\n"); 640 goto free_table; 641 } 642 643 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl); 644 ste->next_htbl = next_htbl; 645 next_htbl->pointing_ste = ste; 646 } 647 648 return 0; 649 650 free_table: 651 mlx5dr_ste_htbl_free(next_htbl); 652 return -ENOENT; 653 } 654 655 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) 656 { 657 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; 658 int num_of_entries; 659 660 htbl->ctrl.may_grow = true; 661 662 if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) 663 htbl->ctrl.may_grow = false; 664 665 /* Threshold is 50%, one is added to table of size 1 */ 666 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); 667 ctrl->increase_threshold = (num_of_entries + 1) / 2; 668 } 669 670 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, 671 enum mlx5dr_icm_chunk_size chunk_size, 672 u8 lu_type, u16 byte_mask) 673 { 674 struct mlx5dr_icm_chunk *chunk; 675 struct mlx5dr_ste_htbl *htbl; 676 int i; 677 678 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 679 if (!htbl) 680 return NULL; 681 682 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size); 683 if (!chunk) 684 goto out_free_htbl; 685 686 htbl->chunk = chunk; 687 htbl->lu_type = lu_type; 688 htbl->byte_mask = byte_mask; 689 htbl->ste_arr = chunk->ste_arr; 690 htbl->hw_ste_arr = chunk->hw_ste_arr; 691 htbl->miss_list = chunk->miss_list; 692 htbl->refcount = 0; 693 694 for (i = 0; i < chunk->num_of_entries; i++) { 695 struct mlx5dr_ste *ste = &htbl->ste_arr[i]; 696 697 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; 698 ste->htbl = htbl; 699 ste->refcount = 0; 700 INIT_LIST_HEAD(&ste->miss_list_node); 701 INIT_LIST_HEAD(&htbl->miss_list[i]); 702 INIT_LIST_HEAD(&ste->rule_list); 703 } 704 705 htbl->chunk_size = chunk_size; 706 dr_ste_set_ctrl(htbl); 707 return htbl; 708 709 out_free_htbl: 710 kfree(htbl); 711 return NULL; 712 } 713 714 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) 715 { 716 if (htbl->refcount) 717 return -EBUSY; 718 719 mlx5dr_icm_free_chunk(htbl->chunk); 720 kfree(htbl); 721 return 0; 722 } 723 724 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, 725 u8 match_criteria, 726 struct mlx5dr_match_param *mask, 727 struct mlx5dr_match_param *value) 728 { 729 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { 730 if (mask->misc.source_port && mask->misc.source_port != 0xffff) { 731 mlx5dr_err(dmn, "Partial mask source_port is not supported\n"); 732 return -EINVAL; 733 } 734 } 735 736 return 0; 737 } 738 739 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, 740 struct mlx5dr_matcher_rx_tx *nic_matcher, 741 struct mlx5dr_match_param *value, 742 u8 *ste_arr) 743 { 744 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 745 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 746 struct mlx5dr_ste_build *sb; 747 int ret, i; 748 749 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, 750 &matcher->mask, value); 751 if (ret) 752 return ret; 753 754 sb = nic_matcher->ste_builder; 755 for (i = 0; i < nic_matcher->num_of_builders; i++) { 756 mlx5dr_ste_init(ste_arr, 757 sb->lu_type, 758 nic_dmn->ste_type, 759 dmn->info.caps.gvmi); 760 761 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); 762 763 ret = sb->ste_build_tag_func(value, sb, ste_arr); 764 if (ret) 765 return ret; 766 767 /* Connect the STEs */ 768 if (i < (nic_matcher->num_of_builders - 1)) { 769 /* Need the next builder for these fields, 770 * not relevant for the last ste in the chain. 771 */ 772 sb++; 773 MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type); 774 MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask); 775 } 776 ste_arr += DR_STE_SIZE; 777 } 778 return 0; 779 } 780 781 static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value, 782 bool inner, u8 *bit_mask) 783 { 784 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 785 786 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16); 787 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0); 788 789 if (mask->smac_47_16 || mask->smac_15_0) { 790 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32, 791 mask->smac_47_16 >> 16); 792 MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0, 793 mask->smac_47_16 << 16 | mask->smac_15_0); 794 mask->smac_47_16 = 0; 795 mask->smac_15_0 = 0; 796 } 797 798 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid); 799 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi); 800 DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio); 801 DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version); 802 803 if (mask->cvlan_tag) { 804 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); 805 mask->cvlan_tag = 0; 806 } else if (mask->svlan_tag) { 807 MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1); 808 mask->svlan_tag = 0; 809 } 810 811 if (mask->cvlan_tag || mask->svlan_tag) { 812 pr_info("Invalid c/svlan mask configuration\n"); 813 return -EINVAL; 814 } 815 816 return 0; 817 } 818 819 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) 820 { 821 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present); 822 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present); 823 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present); 824 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port); 825 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn); 826 827 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port); 828 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask, 829 source_eswitch_owner_vhca_id); 830 831 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio); 832 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi); 833 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid); 834 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio); 835 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi); 836 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid); 837 838 spec->outer_second_cvlan_tag = 839 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag); 840 spec->inner_second_cvlan_tag = 841 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag); 842 spec->outer_second_svlan_tag = 843 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag); 844 spec->inner_second_svlan_tag = 845 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag); 846 847 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol); 848 849 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi); 850 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo); 851 852 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni); 853 854 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni); 855 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam); 856 857 spec->outer_ipv6_flow_label = 858 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label); 859 860 spec->inner_ipv6_flow_label = 861 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label); 862 863 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len); 864 spec->geneve_protocol_type = 865 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type); 866 867 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp); 868 } 869 870 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) 871 { 872 __be32 raw_ip[4]; 873 874 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); 875 876 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0); 877 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype); 878 879 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16); 880 881 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0); 882 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio); 883 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi); 884 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid); 885 886 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol); 887 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp); 888 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn); 889 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag); 890 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag); 891 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag); 892 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version); 893 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags); 894 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport); 895 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport); 896 897 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit); 898 899 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport); 900 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport); 901 902 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 903 src_ipv4_src_ipv6.ipv6_layout.ipv6), 904 sizeof(raw_ip)); 905 906 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]); 907 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]); 908 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]); 909 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]); 910 911 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 912 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 913 sizeof(raw_ip)); 914 915 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]); 916 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]); 917 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]); 918 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]); 919 } 920 921 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec) 922 { 923 spec->outer_first_mpls_label = 924 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label); 925 spec->outer_first_mpls_exp = 926 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp); 927 spec->outer_first_mpls_s_bos = 928 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos); 929 spec->outer_first_mpls_ttl = 930 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl); 931 spec->inner_first_mpls_label = 932 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label); 933 spec->inner_first_mpls_exp = 934 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp); 935 spec->inner_first_mpls_s_bos = 936 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos); 937 spec->inner_first_mpls_ttl = 938 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl); 939 spec->outer_first_mpls_over_gre_label = 940 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label); 941 spec->outer_first_mpls_over_gre_exp = 942 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp); 943 spec->outer_first_mpls_over_gre_s_bos = 944 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos); 945 spec->outer_first_mpls_over_gre_ttl = 946 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl); 947 spec->outer_first_mpls_over_udp_label = 948 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label); 949 spec->outer_first_mpls_over_udp_exp = 950 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp); 951 spec->outer_first_mpls_over_udp_s_bos = 952 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos); 953 spec->outer_first_mpls_over_udp_ttl = 954 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl); 955 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7); 956 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6); 957 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5); 958 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4); 959 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3); 960 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2); 961 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1); 962 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0); 963 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a); 964 } 965 966 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) 967 { 968 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num); 969 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num); 970 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num); 971 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num); 972 spec->outer_vxlan_gpe_vni = 973 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni); 974 spec->outer_vxlan_gpe_next_protocol = 975 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol); 976 spec->outer_vxlan_gpe_flags = 977 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags); 978 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data); 979 spec->icmpv6_header_data = 980 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data); 981 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type); 982 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); 983 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); 984 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); 985 } 986 987 void mlx5dr_ste_copy_param(u8 match_criteria, 988 struct mlx5dr_match_param *set_param, 989 struct mlx5dr_match_parameters *mask) 990 { 991 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {}; 992 u8 *data = (u8 *)mask->match_buf; 993 size_t param_location; 994 void *buff; 995 996 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { 997 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) { 998 memcpy(tail_param, data, mask->match_sz); 999 buff = tail_param; 1000 } else { 1001 buff = mask->match_buf; 1002 } 1003 dr_ste_copy_mask_spec(buff, &set_param->outer); 1004 } 1005 param_location = sizeof(struct mlx5dr_match_spec); 1006 1007 if (match_criteria & DR_MATCHER_CRITERIA_MISC) { 1008 if (mask->match_sz < param_location + 1009 sizeof(struct mlx5dr_match_misc)) { 1010 memcpy(tail_param, data + param_location, 1011 mask->match_sz - param_location); 1012 buff = tail_param; 1013 } else { 1014 buff = data + param_location; 1015 } 1016 dr_ste_copy_mask_misc(buff, &set_param->misc); 1017 } 1018 param_location += sizeof(struct mlx5dr_match_misc); 1019 1020 if (match_criteria & DR_MATCHER_CRITERIA_INNER) { 1021 if (mask->match_sz < param_location + 1022 sizeof(struct mlx5dr_match_spec)) { 1023 memcpy(tail_param, data + param_location, 1024 mask->match_sz - param_location); 1025 buff = tail_param; 1026 } else { 1027 buff = data + param_location; 1028 } 1029 dr_ste_copy_mask_spec(buff, &set_param->inner); 1030 } 1031 param_location += sizeof(struct mlx5dr_match_spec); 1032 1033 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) { 1034 if (mask->match_sz < param_location + 1035 sizeof(struct mlx5dr_match_misc2)) { 1036 memcpy(tail_param, data + param_location, 1037 mask->match_sz - param_location); 1038 buff = tail_param; 1039 } else { 1040 buff = data + param_location; 1041 } 1042 dr_ste_copy_mask_misc2(buff, &set_param->misc2); 1043 } 1044 1045 param_location += sizeof(struct mlx5dr_match_misc2); 1046 1047 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) { 1048 if (mask->match_sz < param_location + 1049 sizeof(struct mlx5dr_match_misc3)) { 1050 memcpy(tail_param, data + param_location, 1051 mask->match_sz - param_location); 1052 buff = tail_param; 1053 } else { 1054 buff = data + param_location; 1055 } 1056 dr_ste_copy_mask_misc3(buff, &set_param->misc3); 1057 } 1058 } 1059 1060 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value, 1061 struct mlx5dr_ste_build *sb, 1062 u8 *hw_ste_p) 1063 { 1064 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1065 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1066 u8 *tag = hw_ste->tag; 1067 1068 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16); 1069 DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0); 1070 1071 if (spec->smac_47_16 || spec->smac_15_0) { 1072 MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32, 1073 spec->smac_47_16 >> 16); 1074 MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0, 1075 spec->smac_47_16 << 16 | spec->smac_15_0); 1076 spec->smac_47_16 = 0; 1077 spec->smac_15_0 = 0; 1078 } 1079 1080 if (spec->ip_version) { 1081 if (spec->ip_version == IP_VERSION_IPV4) { 1082 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4); 1083 spec->ip_version = 0; 1084 } else if (spec->ip_version == IP_VERSION_IPV6) { 1085 MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6); 1086 spec->ip_version = 0; 1087 } else { 1088 pr_info("Unsupported ip_version value\n"); 1089 return -EINVAL; 1090 } 1091 } 1092 1093 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid); 1094 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi); 1095 DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio); 1096 1097 if (spec->cvlan_tag) { 1098 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN); 1099 spec->cvlan_tag = 0; 1100 } else if (spec->svlan_tag) { 1101 MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN); 1102 spec->svlan_tag = 0; 1103 } 1104 return 0; 1105 } 1106 1107 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb, 1108 struct mlx5dr_match_param *mask, 1109 bool inner, bool rx) 1110 { 1111 int ret; 1112 1113 ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask); 1114 if (ret) 1115 return ret; 1116 1117 sb->rx = rx; 1118 sb->inner = inner; 1119 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner); 1120 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1121 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag; 1122 1123 return 0; 1124 } 1125 1126 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value, 1127 bool inner, u8 *bit_mask) 1128 { 1129 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1130 1131 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96); 1132 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64); 1133 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32); 1134 DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0); 1135 } 1136 1137 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, 1138 struct mlx5dr_ste_build *sb, 1139 u8 *hw_ste_p) 1140 { 1141 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1142 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1143 u8 *tag = hw_ste->tag; 1144 1145 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96); 1146 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64); 1147 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32); 1148 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0); 1149 1150 return 0; 1151 } 1152 1153 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb, 1154 struct mlx5dr_match_param *mask, 1155 bool inner, bool rx) 1156 { 1157 dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask); 1158 1159 sb->rx = rx; 1160 sb->inner = inner; 1161 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner); 1162 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1163 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag; 1164 } 1165 1166 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value, 1167 bool inner, u8 *bit_mask) 1168 { 1169 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1170 1171 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96); 1172 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64); 1173 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32); 1174 DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0); 1175 } 1176 1177 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, 1178 struct mlx5dr_ste_build *sb, 1179 u8 *hw_ste_p) 1180 { 1181 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1182 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1183 u8 *tag = hw_ste->tag; 1184 1185 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96); 1186 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64); 1187 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32); 1188 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0); 1189 1190 return 0; 1191 } 1192 1193 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb, 1194 struct mlx5dr_match_param *mask, 1195 bool inner, bool rx) 1196 { 1197 dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask); 1198 1199 sb->rx = rx; 1200 sb->inner = inner; 1201 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner); 1202 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1203 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag; 1204 } 1205 1206 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value, 1207 bool inner, 1208 u8 *bit_mask) 1209 { 1210 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1211 1212 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1213 destination_address, mask, dst_ip_31_0); 1214 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1215 source_address, mask, src_ip_31_0); 1216 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1217 destination_port, mask, tcp_dport); 1218 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1219 destination_port, mask, udp_dport); 1220 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1221 source_port, mask, tcp_sport); 1222 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1223 source_port, mask, udp_sport); 1224 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1225 protocol, mask, ip_protocol); 1226 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1227 fragmented, mask, frag); 1228 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1229 dscp, mask, ip_dscp); 1230 DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask, 1231 ecn, mask, ip_ecn); 1232 1233 if (mask->tcp_flags) { 1234 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask); 1235 mask->tcp_flags = 0; 1236 } 1237 } 1238 1239 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value, 1240 struct mlx5dr_ste_build *sb, 1241 u8 *hw_ste_p) 1242 { 1243 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1244 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1245 u8 *tag = hw_ste->tag; 1246 1247 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0); 1248 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0); 1249 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport); 1250 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport); 1251 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport); 1252 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport); 1253 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol); 1254 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag); 1255 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp); 1256 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn); 1257 1258 if (spec->tcp_flags) { 1259 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec); 1260 spec->tcp_flags = 0; 1261 } 1262 1263 return 0; 1264 } 1265 1266 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, 1267 struct mlx5dr_match_param *mask, 1268 bool inner, bool rx) 1269 { 1270 dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask); 1271 1272 sb->rx = rx; 1273 sb->inner = inner; 1274 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner); 1275 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1276 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag; 1277 } 1278 1279 static void 1280 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value, 1281 bool inner, u8 *bit_mask) 1282 { 1283 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1284 struct mlx5dr_match_misc *misc_mask = &value->misc; 1285 1286 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid); 1287 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi); 1288 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio); 1289 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag); 1290 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype); 1291 DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version); 1292 1293 if (mask->svlan_tag || mask->cvlan_tag) { 1294 MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1); 1295 mask->cvlan_tag = 0; 1296 mask->svlan_tag = 0; 1297 } 1298 1299 if (inner) { 1300 if (misc_mask->inner_second_cvlan_tag || 1301 misc_mask->inner_second_svlan_tag) { 1302 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); 1303 misc_mask->inner_second_cvlan_tag = 0; 1304 misc_mask->inner_second_svlan_tag = 0; 1305 } 1306 1307 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1308 second_vlan_id, misc_mask, inner_second_vid); 1309 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1310 second_cfi, misc_mask, inner_second_cfi); 1311 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1312 second_priority, misc_mask, inner_second_prio); 1313 } else { 1314 if (misc_mask->outer_second_cvlan_tag || 1315 misc_mask->outer_second_svlan_tag) { 1316 MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1); 1317 misc_mask->outer_second_cvlan_tag = 0; 1318 misc_mask->outer_second_svlan_tag = 0; 1319 } 1320 1321 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1322 second_vlan_id, misc_mask, outer_second_vid); 1323 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1324 second_cfi, misc_mask, outer_second_cfi); 1325 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, 1326 second_priority, misc_mask, outer_second_prio); 1327 } 1328 } 1329 1330 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value, 1331 bool inner, u8 *hw_ste_p) 1332 { 1333 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1334 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer; 1335 struct mlx5dr_match_misc *misc_spec = &value->misc; 1336 u8 *tag = hw_ste->tag; 1337 1338 DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid); 1339 DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi); 1340 DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio); 1341 DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag); 1342 DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype); 1343 1344 if (spec->ip_version) { 1345 if (spec->ip_version == IP_VERSION_IPV4) { 1346 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4); 1347 spec->ip_version = 0; 1348 } else if (spec->ip_version == IP_VERSION_IPV6) { 1349 MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6); 1350 spec->ip_version = 0; 1351 } else { 1352 pr_info("Unsupported ip_version value\n"); 1353 return -EINVAL; 1354 } 1355 } 1356 1357 if (spec->cvlan_tag) { 1358 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN); 1359 spec->cvlan_tag = 0; 1360 } else if (spec->svlan_tag) { 1361 MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN); 1362 spec->svlan_tag = 0; 1363 } 1364 1365 if (inner) { 1366 if (misc_spec->inner_second_cvlan_tag) { 1367 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); 1368 misc_spec->inner_second_cvlan_tag = 0; 1369 } else if (misc_spec->inner_second_svlan_tag) { 1370 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); 1371 misc_spec->inner_second_svlan_tag = 0; 1372 } 1373 1374 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid); 1375 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi); 1376 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio); 1377 } else { 1378 if (misc_spec->outer_second_cvlan_tag) { 1379 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN); 1380 misc_spec->outer_second_cvlan_tag = 0; 1381 } else if (misc_spec->outer_second_svlan_tag) { 1382 MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN); 1383 misc_spec->outer_second_svlan_tag = 0; 1384 } 1385 DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid); 1386 DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi); 1387 DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio); 1388 } 1389 1390 return 0; 1391 } 1392 1393 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value, 1394 bool inner, u8 *bit_mask) 1395 { 1396 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1397 1398 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16); 1399 DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0); 1400 1401 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); 1402 } 1403 1404 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value, 1405 struct mlx5dr_ste_build *sb, 1406 u8 *hw_ste_p) 1407 { 1408 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1409 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1410 u8 *tag = hw_ste->tag; 1411 1412 DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16); 1413 DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0); 1414 1415 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p); 1416 } 1417 1418 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb, 1419 struct mlx5dr_match_param *mask, 1420 bool inner, bool rx) 1421 { 1422 dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask); 1423 sb->rx = rx; 1424 sb->inner = inner; 1425 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner); 1426 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1427 sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag; 1428 } 1429 1430 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value, 1431 bool inner, u8 *bit_mask) 1432 { 1433 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1434 1435 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16); 1436 DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0); 1437 1438 dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask); 1439 } 1440 1441 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, 1442 struct mlx5dr_ste_build *sb, 1443 u8 *hw_ste_p) 1444 { 1445 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1446 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1447 u8 *tag = hw_ste->tag; 1448 1449 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16); 1450 DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0); 1451 1452 return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p); 1453 } 1454 1455 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, 1456 struct mlx5dr_match_param *mask, 1457 bool inner, bool rx) 1458 { 1459 dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask); 1460 1461 sb->rx = rx; 1462 sb->inner = inner; 1463 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner); 1464 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1465 sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag; 1466 } 1467 1468 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value, 1469 bool inner, u8 *bit_mask) 1470 { 1471 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1472 struct mlx5dr_match_misc *misc = &value->misc; 1473 1474 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16); 1475 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0); 1476 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid); 1477 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi); 1478 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio); 1479 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag); 1480 DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype); 1481 DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version); 1482 1483 if (misc->vxlan_vni) { 1484 MLX5_SET(ste_eth_l2_tnl, bit_mask, 1485 l2_tunneling_network_id, (misc->vxlan_vni << 8)); 1486 misc->vxlan_vni = 0; 1487 } 1488 1489 if (mask->svlan_tag || mask->cvlan_tag) { 1490 MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1); 1491 mask->cvlan_tag = 0; 1492 mask->svlan_tag = 0; 1493 } 1494 } 1495 1496 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, 1497 struct mlx5dr_ste_build *sb, 1498 u8 *hw_ste_p) 1499 { 1500 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1501 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1502 struct mlx5dr_match_misc *misc = &value->misc; 1503 u8 *tag = hw_ste->tag; 1504 1505 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16); 1506 DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0); 1507 DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid); 1508 DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi); 1509 DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag); 1510 DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio); 1511 DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype); 1512 1513 if (misc->vxlan_vni) { 1514 MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id, 1515 (misc->vxlan_vni << 8)); 1516 misc->vxlan_vni = 0; 1517 } 1518 1519 if (spec->cvlan_tag) { 1520 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN); 1521 spec->cvlan_tag = 0; 1522 } else if (spec->svlan_tag) { 1523 MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN); 1524 spec->svlan_tag = 0; 1525 } 1526 1527 if (spec->ip_version) { 1528 if (spec->ip_version == IP_VERSION_IPV4) { 1529 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4); 1530 spec->ip_version = 0; 1531 } else if (spec->ip_version == IP_VERSION_IPV6) { 1532 MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6); 1533 spec->ip_version = 0; 1534 } else { 1535 return -EINVAL; 1536 } 1537 } 1538 1539 return 0; 1540 } 1541 1542 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, 1543 struct mlx5dr_match_param *mask, bool inner, bool rx) 1544 { 1545 dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask); 1546 1547 sb->rx = rx; 1548 sb->inner = inner; 1549 sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I; 1550 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1551 sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag; 1552 } 1553 1554 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value, 1555 bool inner, u8 *bit_mask) 1556 { 1557 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1558 1559 DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit); 1560 } 1561 1562 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, 1563 struct mlx5dr_ste_build *sb, 1564 u8 *hw_ste_p) 1565 { 1566 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1567 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1568 u8 *tag = hw_ste->tag; 1569 1570 DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); 1571 1572 return 0; 1573 } 1574 1575 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb, 1576 struct mlx5dr_match_param *mask, 1577 bool inner, bool rx) 1578 { 1579 dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask); 1580 1581 sb->rx = rx; 1582 sb->inner = inner; 1583 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner); 1584 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1585 sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag; 1586 } 1587 1588 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value, 1589 bool inner, u8 *bit_mask) 1590 { 1591 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer; 1592 1593 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport); 1594 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport); 1595 DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport); 1596 DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport); 1597 DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol); 1598 DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag); 1599 DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp); 1600 DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn); 1601 DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit); 1602 1603 if (mask->tcp_flags) { 1604 DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask); 1605 mask->tcp_flags = 0; 1606 } 1607 } 1608 1609 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, 1610 struct mlx5dr_ste_build *sb, 1611 u8 *hw_ste_p) 1612 { 1613 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; 1614 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1615 u8 *tag = hw_ste->tag; 1616 1617 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); 1618 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); 1619 DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport); 1620 DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport); 1621 DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol); 1622 DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag); 1623 DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp); 1624 DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); 1625 DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); 1626 1627 if (spec->tcp_flags) { 1628 DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); 1629 spec->tcp_flags = 0; 1630 } 1631 1632 return 0; 1633 } 1634 1635 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, 1636 struct mlx5dr_match_param *mask, 1637 bool inner, bool rx) 1638 { 1639 dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask); 1640 1641 sb->rx = rx; 1642 sb->inner = inner; 1643 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner); 1644 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1645 sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag; 1646 } 1647 1648 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value, 1649 struct mlx5dr_ste_build *sb, 1650 u8 *hw_ste_p) 1651 { 1652 return 0; 1653 } 1654 1655 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx) 1656 { 1657 sb->rx = rx; 1658 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE; 1659 sb->byte_mask = 0; 1660 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag; 1661 } 1662 1663 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value, 1664 bool inner, u8 *bit_mask) 1665 { 1666 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; 1667 1668 if (inner) 1669 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask); 1670 else 1671 DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask); 1672 } 1673 1674 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value, 1675 struct mlx5dr_ste_build *sb, 1676 u8 *hw_ste_p) 1677 { 1678 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1679 struct mlx5dr_match_misc2 *misc2_mask = &value->misc2; 1680 u8 *tag = hw_ste->tag; 1681 1682 if (sb->inner) 1683 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag); 1684 else 1685 DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag); 1686 1687 return 0; 1688 } 1689 1690 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, 1691 struct mlx5dr_match_param *mask, 1692 bool inner, bool rx) 1693 { 1694 dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask); 1695 1696 sb->rx = rx; 1697 sb->inner = inner; 1698 sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner); 1699 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1700 sb->ste_build_tag_func = &dr_ste_build_mpls_tag; 1701 } 1702 1703 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value, 1704 bool inner, u8 *bit_mask) 1705 { 1706 struct mlx5dr_match_misc *misc_mask = &value->misc; 1707 1708 DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol); 1709 DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present); 1710 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h); 1711 DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l); 1712 1713 DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present); 1714 DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present); 1715 } 1716 1717 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value, 1718 struct mlx5dr_ste_build *sb, 1719 u8 *hw_ste_p) 1720 { 1721 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1722 struct mlx5dr_match_misc *misc = &value->misc; 1723 u8 *tag = hw_ste->tag; 1724 1725 DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol); 1726 1727 DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present); 1728 DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h); 1729 DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l); 1730 1731 DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present); 1732 1733 DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present); 1734 1735 return 0; 1736 } 1737 1738 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, 1739 struct mlx5dr_match_param *mask, bool inner, bool rx) 1740 { 1741 dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask); 1742 1743 sb->rx = rx; 1744 sb->inner = inner; 1745 sb->lu_type = MLX5DR_STE_LU_TYPE_GRE; 1746 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1747 sb->ste_build_tag_func = &dr_ste_build_gre_tag; 1748 } 1749 1750 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value, 1751 bool inner, u8 *bit_mask) 1752 { 1753 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 1754 1755 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { 1756 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, 1757 misc_2_mask, outer_first_mpls_over_gre_label); 1758 1759 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, 1760 misc_2_mask, outer_first_mpls_over_gre_exp); 1761 1762 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, 1763 misc_2_mask, outer_first_mpls_over_gre_s_bos); 1764 1765 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, 1766 misc_2_mask, outer_first_mpls_over_gre_ttl); 1767 } else { 1768 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label, 1769 misc_2_mask, outer_first_mpls_over_udp_label); 1770 1771 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp, 1772 misc_2_mask, outer_first_mpls_over_udp_exp); 1773 1774 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos, 1775 misc_2_mask, outer_first_mpls_over_udp_s_bos); 1776 1777 DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl, 1778 misc_2_mask, outer_first_mpls_over_udp_ttl); 1779 } 1780 } 1781 1782 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value, 1783 struct mlx5dr_ste_build *sb, 1784 u8 *hw_ste_p) 1785 { 1786 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1787 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 1788 u8 *tag = hw_ste->tag; 1789 1790 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) { 1791 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, 1792 misc_2_mask, outer_first_mpls_over_gre_label); 1793 1794 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, 1795 misc_2_mask, outer_first_mpls_over_gre_exp); 1796 1797 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, 1798 misc_2_mask, outer_first_mpls_over_gre_s_bos); 1799 1800 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, 1801 misc_2_mask, outer_first_mpls_over_gre_ttl); 1802 } else { 1803 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, 1804 misc_2_mask, outer_first_mpls_over_udp_label); 1805 1806 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, 1807 misc_2_mask, outer_first_mpls_over_udp_exp); 1808 1809 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, 1810 misc_2_mask, outer_first_mpls_over_udp_s_bos); 1811 1812 DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl, 1813 misc_2_mask, outer_first_mpls_over_udp_ttl); 1814 } 1815 return 0; 1816 } 1817 1818 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, 1819 struct mlx5dr_match_param *mask, 1820 bool inner, bool rx) 1821 { 1822 dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask); 1823 1824 sb->rx = rx; 1825 sb->inner = inner; 1826 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0; 1827 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1828 sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag; 1829 } 1830 1831 #define ICMP_TYPE_OFFSET_FIRST_DW 24 1832 #define ICMP_CODE_OFFSET_FIRST_DW 16 1833 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0 1834 1835 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask, 1836 struct mlx5dr_cmd_caps *caps, 1837 u8 *bit_mask) 1838 { 1839 struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3; 1840 bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask); 1841 u32 icmp_header_data_mask; 1842 u32 icmp_type_mask; 1843 u32 icmp_code_mask; 1844 int dw0_location; 1845 int dw1_location; 1846 1847 if (is_ipv4_mask) { 1848 icmp_header_data_mask = misc_3_mask->icmpv4_header_data; 1849 icmp_type_mask = misc_3_mask->icmpv4_type; 1850 icmp_code_mask = misc_3_mask->icmpv4_code; 1851 dw0_location = caps->flex_parser_id_icmp_dw0; 1852 dw1_location = caps->flex_parser_id_icmp_dw1; 1853 } else { 1854 icmp_header_data_mask = misc_3_mask->icmpv6_header_data; 1855 icmp_type_mask = misc_3_mask->icmpv6_type; 1856 icmp_code_mask = misc_3_mask->icmpv6_code; 1857 dw0_location = caps->flex_parser_id_icmpv6_dw0; 1858 dw1_location = caps->flex_parser_id_icmpv6_dw1; 1859 } 1860 1861 switch (dw0_location) { 1862 case 4: 1863 if (icmp_type_mask) { 1864 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, 1865 (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW)); 1866 if (is_ipv4_mask) 1867 misc_3_mask->icmpv4_type = 0; 1868 else 1869 misc_3_mask->icmpv6_type = 0; 1870 } 1871 if (icmp_code_mask) { 1872 u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask, 1873 flex_parser_4); 1874 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4, 1875 cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW)); 1876 if (is_ipv4_mask) 1877 misc_3_mask->icmpv4_code = 0; 1878 else 1879 misc_3_mask->icmpv6_code = 0; 1880 } 1881 break; 1882 default: 1883 return -EINVAL; 1884 } 1885 1886 switch (dw1_location) { 1887 case 5: 1888 if (icmp_header_data_mask) { 1889 MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5, 1890 (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); 1891 if (is_ipv4_mask) 1892 misc_3_mask->icmpv4_header_data = 0; 1893 else 1894 misc_3_mask->icmpv6_header_data = 0; 1895 } 1896 break; 1897 default: 1898 return -EINVAL; 1899 } 1900 1901 return 0; 1902 } 1903 1904 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, 1905 struct mlx5dr_ste_build *sb, 1906 u8 *hw_ste_p) 1907 { 1908 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 1909 struct mlx5dr_match_misc3 *misc_3 = &value->misc3; 1910 u8 *tag = hw_ste->tag; 1911 u32 icmp_header_data; 1912 int dw0_location; 1913 int dw1_location; 1914 u32 icmp_type; 1915 u32 icmp_code; 1916 bool is_ipv4; 1917 1918 is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3); 1919 if (is_ipv4) { 1920 icmp_header_data = misc_3->icmpv4_header_data; 1921 icmp_type = misc_3->icmpv4_type; 1922 icmp_code = misc_3->icmpv4_code; 1923 dw0_location = sb->caps->flex_parser_id_icmp_dw0; 1924 dw1_location = sb->caps->flex_parser_id_icmp_dw1; 1925 } else { 1926 icmp_header_data = misc_3->icmpv6_header_data; 1927 icmp_type = misc_3->icmpv6_type; 1928 icmp_code = misc_3->icmpv6_code; 1929 dw0_location = sb->caps->flex_parser_id_icmpv6_dw0; 1930 dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; 1931 } 1932 1933 switch (dw0_location) { 1934 case 4: 1935 if (icmp_type) { 1936 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, 1937 (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW)); 1938 if (is_ipv4) 1939 misc_3->icmpv4_type = 0; 1940 else 1941 misc_3->icmpv6_type = 0; 1942 } 1943 1944 if (icmp_code) { 1945 u32 cur_val = MLX5_GET(ste_flex_parser_1, tag, 1946 flex_parser_4); 1947 MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, 1948 cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW)); 1949 if (is_ipv4) 1950 misc_3->icmpv4_code = 0; 1951 else 1952 misc_3->icmpv6_code = 0; 1953 } 1954 break; 1955 default: 1956 return -EINVAL; 1957 } 1958 1959 switch (dw1_location) { 1960 case 5: 1961 if (icmp_header_data) { 1962 MLX5_SET(ste_flex_parser_1, tag, flex_parser_5, 1963 (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW)); 1964 if (is_ipv4) 1965 misc_3->icmpv4_header_data = 0; 1966 else 1967 misc_3->icmpv6_header_data = 0; 1968 } 1969 break; 1970 default: 1971 return -EINVAL; 1972 } 1973 1974 return 0; 1975 } 1976 1977 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, 1978 struct mlx5dr_match_param *mask, 1979 struct mlx5dr_cmd_caps *caps, 1980 bool inner, bool rx) 1981 { 1982 int ret; 1983 1984 ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask); 1985 if (ret) 1986 return ret; 1987 1988 sb->rx = rx; 1989 sb->inner = inner; 1990 sb->caps = caps; 1991 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1; 1992 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 1993 sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag; 1994 1995 return 0; 1996 } 1997 1998 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value, 1999 bool inner, u8 *bit_mask) 2000 { 2001 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 2002 2003 DR_STE_SET_MASK_V(general_purpose, bit_mask, 2004 general_purpose_lookup_field, misc_2_mask, 2005 metadata_reg_a); 2006 } 2007 2008 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value, 2009 struct mlx5dr_ste_build *sb, 2010 u8 *hw_ste_p) 2011 { 2012 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2013 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 2014 u8 *tag = hw_ste->tag; 2015 2016 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field, 2017 misc_2_mask, metadata_reg_a); 2018 2019 return 0; 2020 } 2021 2022 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, 2023 struct mlx5dr_match_param *mask, 2024 bool inner, bool rx) 2025 { 2026 dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask); 2027 2028 sb->rx = rx; 2029 sb->inner = inner; 2030 sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE; 2031 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2032 sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag; 2033 } 2034 2035 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value, 2036 bool inner, u8 *bit_mask) 2037 { 2038 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; 2039 2040 if (inner) { 2041 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, 2042 inner_tcp_seq_num); 2043 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, 2044 inner_tcp_ack_num); 2045 } else { 2046 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask, 2047 outer_tcp_seq_num); 2048 DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask, 2049 outer_tcp_ack_num); 2050 } 2051 } 2052 2053 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, 2054 struct mlx5dr_ste_build *sb, 2055 u8 *hw_ste_p) 2056 { 2057 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2058 struct mlx5dr_match_misc3 *misc3 = &value->misc3; 2059 u8 *tag = hw_ste->tag; 2060 2061 if (sb->inner) { 2062 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num); 2063 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num); 2064 } else { 2065 DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num); 2066 DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num); 2067 } 2068 2069 return 0; 2070 } 2071 2072 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, 2073 struct mlx5dr_match_param *mask, 2074 bool inner, bool rx) 2075 { 2076 dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask); 2077 2078 sb->rx = rx; 2079 sb->inner = inner; 2080 sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner); 2081 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2082 sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; 2083 } 2084 2085 static void 2086 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value, 2087 bool inner, u8 *bit_mask) 2088 { 2089 struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; 2090 2091 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, 2092 outer_vxlan_gpe_flags, 2093 misc_3_mask, outer_vxlan_gpe_flags); 2094 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, 2095 outer_vxlan_gpe_next_protocol, 2096 misc_3_mask, outer_vxlan_gpe_next_protocol); 2097 DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, 2098 outer_vxlan_gpe_vni, 2099 misc_3_mask, outer_vxlan_gpe_vni); 2100 } 2101 2102 static int 2103 dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, 2104 struct mlx5dr_ste_build *sb, 2105 u8 *hw_ste_p) 2106 { 2107 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2108 struct mlx5dr_match_misc3 *misc3 = &value->misc3; 2109 u8 *tag = hw_ste->tag; 2110 2111 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, 2112 outer_vxlan_gpe_flags, misc3, 2113 outer_vxlan_gpe_flags); 2114 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, 2115 outer_vxlan_gpe_next_protocol, misc3, 2116 outer_vxlan_gpe_next_protocol); 2117 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, 2118 outer_vxlan_gpe_vni, misc3, 2119 outer_vxlan_gpe_vni); 2120 2121 return 0; 2122 } 2123 2124 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, 2125 struct mlx5dr_match_param *mask, 2126 bool inner, bool rx) 2127 { 2128 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, 2129 sb->bit_mask); 2130 2131 sb->rx = rx; 2132 sb->inner = inner; 2133 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; 2134 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2135 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag; 2136 } 2137 2138 static void 2139 dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value, 2140 u8 *bit_mask) 2141 { 2142 struct mlx5dr_match_misc *misc_mask = &value->misc; 2143 2144 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, 2145 geneve_protocol_type, 2146 misc_mask, geneve_protocol_type); 2147 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, 2148 geneve_oam, 2149 misc_mask, geneve_oam); 2150 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, 2151 geneve_opt_len, 2152 misc_mask, geneve_opt_len); 2153 DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, 2154 geneve_vni, 2155 misc_mask, geneve_vni); 2156 } 2157 2158 static int 2159 dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, 2160 struct mlx5dr_ste_build *sb, 2161 u8 *hw_ste_p) 2162 { 2163 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2164 struct mlx5dr_match_misc *misc = &value->misc; 2165 u8 *tag = hw_ste->tag; 2166 2167 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, 2168 geneve_protocol_type, misc, geneve_protocol_type); 2169 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, 2170 geneve_oam, misc, geneve_oam); 2171 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, 2172 geneve_opt_len, misc, geneve_opt_len); 2173 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, 2174 geneve_vni, misc, geneve_vni); 2175 2176 return 0; 2177 } 2178 2179 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, 2180 struct mlx5dr_match_param *mask, 2181 bool inner, bool rx) 2182 { 2183 dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); 2184 sb->rx = rx; 2185 sb->inner = inner; 2186 sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; 2187 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2188 sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag; 2189 } 2190 2191 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, 2192 u8 *bit_mask) 2193 { 2194 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 2195 2196 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h, 2197 misc_2_mask, metadata_reg_c_0); 2198 DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l, 2199 misc_2_mask, metadata_reg_c_1); 2200 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h, 2201 misc_2_mask, metadata_reg_c_2); 2202 DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l, 2203 misc_2_mask, metadata_reg_c_3); 2204 } 2205 2206 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value, 2207 struct mlx5dr_ste_build *sb, 2208 u8 *hw_ste_p) 2209 { 2210 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2211 struct mlx5dr_match_misc2 *misc2 = &value->misc2; 2212 u8 *tag = hw_ste->tag; 2213 2214 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0); 2215 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1); 2216 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2); 2217 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3); 2218 2219 return 0; 2220 } 2221 2222 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb, 2223 struct mlx5dr_match_param *mask, 2224 bool inner, bool rx) 2225 { 2226 dr_ste_build_register_0_bit_mask(mask, sb->bit_mask); 2227 2228 sb->rx = rx; 2229 sb->inner = inner; 2230 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0; 2231 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2232 sb->ste_build_tag_func = &dr_ste_build_register_0_tag; 2233 } 2234 2235 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value, 2236 u8 *bit_mask) 2237 { 2238 struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2; 2239 2240 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h, 2241 misc_2_mask, metadata_reg_c_4); 2242 DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l, 2243 misc_2_mask, metadata_reg_c_5); 2244 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h, 2245 misc_2_mask, metadata_reg_c_6); 2246 DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l, 2247 misc_2_mask, metadata_reg_c_7); 2248 } 2249 2250 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value, 2251 struct mlx5dr_ste_build *sb, 2252 u8 *hw_ste_p) 2253 { 2254 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2255 struct mlx5dr_match_misc2 *misc2 = &value->misc2; 2256 u8 *tag = hw_ste->tag; 2257 2258 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4); 2259 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5); 2260 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6); 2261 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7); 2262 2263 return 0; 2264 } 2265 2266 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb, 2267 struct mlx5dr_match_param *mask, 2268 bool inner, bool rx) 2269 { 2270 dr_ste_build_register_1_bit_mask(mask, sb->bit_mask); 2271 2272 sb->rx = rx; 2273 sb->inner = inner; 2274 sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1; 2275 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2276 sb->ste_build_tag_func = &dr_ste_build_register_1_tag; 2277 } 2278 2279 static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value, 2280 u8 *bit_mask) 2281 { 2282 struct mlx5dr_match_misc *misc_mask = &value->misc; 2283 2284 /* Partial misc source_port is not supported */ 2285 if (misc_mask->source_port && misc_mask->source_port != 0xffff) 2286 return -EINVAL; 2287 2288 /* Partial misc source_eswitch_owner_vhca_id is not supported */ 2289 if (misc_mask->source_eswitch_owner_vhca_id && 2290 misc_mask->source_eswitch_owner_vhca_id != 0xffff) 2291 return -EINVAL; 2292 2293 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port); 2294 DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn); 2295 misc_mask->source_eswitch_owner_vhca_id = 0; 2296 2297 return 0; 2298 } 2299 2300 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, 2301 struct mlx5dr_ste_build *sb, 2302 u8 *hw_ste_p) 2303 { 2304 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 2305 struct mlx5dr_match_misc *misc = &value->misc; 2306 struct mlx5dr_cmd_vport_cap *vport_cap; 2307 struct mlx5dr_domain *dmn = sb->dmn; 2308 struct mlx5dr_cmd_caps *caps; 2309 u8 *bit_mask = sb->bit_mask; 2310 u8 *tag = hw_ste->tag; 2311 bool source_gvmi_set; 2312 2313 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); 2314 2315 if (sb->vhca_id_valid) { 2316 /* Find port GVMI based on the eswitch_owner_vhca_id */ 2317 if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) 2318 caps = &dmn->info.caps; 2319 else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == 2320 dmn->peer_dmn->info.caps.gvmi)) 2321 caps = &dmn->peer_dmn->info.caps; 2322 else 2323 return -EINVAL; 2324 } else { 2325 caps = &dmn->info.caps; 2326 } 2327 2328 vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); 2329 if (!vport_cap) 2330 return -EINVAL; 2331 2332 source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); 2333 if (vport_cap->vport_gvmi && source_gvmi_set) 2334 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); 2335 2336 misc->source_eswitch_owner_vhca_id = 0; 2337 misc->source_port = 0; 2338 2339 return 0; 2340 } 2341 2342 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb, 2343 struct mlx5dr_match_param *mask, 2344 struct mlx5dr_domain *dmn, 2345 bool inner, bool rx) 2346 { 2347 int ret; 2348 2349 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */ 2350 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id; 2351 2352 ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); 2353 if (ret) 2354 return ret; 2355 2356 sb->rx = rx; 2357 sb->dmn = dmn; 2358 sb->inner = inner; 2359 sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP; 2360 sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); 2361 sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag; 2362 2363 return 0; 2364 } 2365