1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 9 #define to_fltr_conf_from_desc(p) \ 10 container_of(p, struct virtchnl_fdir_fltr_conf, input) 11 12 #define ICE_FLOW_PROF_TYPE_S 0 13 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 14 #define ICE_FLOW_PROF_VSI_S 32 15 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 16 17 /* Flow profile ID format: 18 * [0:31] - flow type, flow + tun_offs 19 * [32:63] - VSI index 20 */ 21 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 22 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 23 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 24 25 #define GTPU_TEID_OFFSET 4 26 #define GTPU_EH_QFI_OFFSET 1 27 #define GTPU_EH_QFI_MASK 0x3F 28 #define PFCP_S_OFFSET 0 29 #define PFCP_S_MASK 0x1 30 #define PFCP_PORT_NR 8805 31 32 #define FDIR_INSET_FLAG_ESP_S 0 33 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 34 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 36 37 enum ice_fdir_tunnel_type { 38 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 39 ICE_FDIR_TUNNEL_TYPE_GTPU, 40 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 41 }; 42 43 struct virtchnl_fdir_fltr_conf { 44 struct ice_fdir_fltr input; 45 enum ice_fdir_tunnel_type ttype; 46 u64 inset_flag; 47 u32 flow_id; 48 }; 49 50 static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { 51 VIRTCHNL_PROTO_HDR_ETH, 52 VIRTCHNL_PROTO_HDR_NONE, 53 }; 54 55 static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { 56 VIRTCHNL_PROTO_HDR_ETH, 57 VIRTCHNL_PROTO_HDR_IPV4, 58 VIRTCHNL_PROTO_HDR_NONE, 59 }; 60 61 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { 62 VIRTCHNL_PROTO_HDR_ETH, 63 VIRTCHNL_PROTO_HDR_IPV4, 64 VIRTCHNL_PROTO_HDR_TCP, 65 VIRTCHNL_PROTO_HDR_NONE, 66 }; 67 68 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { 69 VIRTCHNL_PROTO_HDR_ETH, 70 VIRTCHNL_PROTO_HDR_IPV4, 71 VIRTCHNL_PROTO_HDR_UDP, 72 VIRTCHNL_PROTO_HDR_NONE, 73 }; 74 75 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { 76 VIRTCHNL_PROTO_HDR_ETH, 77 VIRTCHNL_PROTO_HDR_IPV4, 78 VIRTCHNL_PROTO_HDR_SCTP, 79 VIRTCHNL_PROTO_HDR_NONE, 80 }; 81 82 static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { 83 VIRTCHNL_PROTO_HDR_ETH, 84 VIRTCHNL_PROTO_HDR_IPV6, 85 VIRTCHNL_PROTO_HDR_NONE, 86 }; 87 88 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { 89 VIRTCHNL_PROTO_HDR_ETH, 90 VIRTCHNL_PROTO_HDR_IPV6, 91 VIRTCHNL_PROTO_HDR_TCP, 92 VIRTCHNL_PROTO_HDR_NONE, 93 }; 94 95 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { 96 VIRTCHNL_PROTO_HDR_ETH, 97 VIRTCHNL_PROTO_HDR_IPV6, 98 VIRTCHNL_PROTO_HDR_UDP, 99 VIRTCHNL_PROTO_HDR_NONE, 100 }; 101 102 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { 103 VIRTCHNL_PROTO_HDR_ETH, 104 VIRTCHNL_PROTO_HDR_IPV6, 105 VIRTCHNL_PROTO_HDR_SCTP, 106 VIRTCHNL_PROTO_HDR_NONE, 107 }; 108 109 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { 110 VIRTCHNL_PROTO_HDR_ETH, 111 VIRTCHNL_PROTO_HDR_IPV4, 112 VIRTCHNL_PROTO_HDR_UDP, 113 VIRTCHNL_PROTO_HDR_GTPU_IP, 114 VIRTCHNL_PROTO_HDR_NONE, 115 }; 116 117 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { 118 VIRTCHNL_PROTO_HDR_ETH, 119 VIRTCHNL_PROTO_HDR_IPV4, 120 VIRTCHNL_PROTO_HDR_UDP, 121 VIRTCHNL_PROTO_HDR_GTPU_IP, 122 VIRTCHNL_PROTO_HDR_GTPU_EH, 123 VIRTCHNL_PROTO_HDR_NONE, 124 }; 125 126 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { 127 VIRTCHNL_PROTO_HDR_ETH, 128 VIRTCHNL_PROTO_HDR_IPV4, 129 VIRTCHNL_PROTO_HDR_L2TPV3, 130 VIRTCHNL_PROTO_HDR_NONE, 131 }; 132 133 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { 134 VIRTCHNL_PROTO_HDR_ETH, 135 VIRTCHNL_PROTO_HDR_IPV6, 136 VIRTCHNL_PROTO_HDR_L2TPV3, 137 VIRTCHNL_PROTO_HDR_NONE, 138 }; 139 140 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { 141 VIRTCHNL_PROTO_HDR_ETH, 142 VIRTCHNL_PROTO_HDR_IPV4, 143 VIRTCHNL_PROTO_HDR_ESP, 144 VIRTCHNL_PROTO_HDR_NONE, 145 }; 146 147 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { 148 VIRTCHNL_PROTO_HDR_ETH, 149 VIRTCHNL_PROTO_HDR_IPV6, 150 VIRTCHNL_PROTO_HDR_ESP, 151 VIRTCHNL_PROTO_HDR_NONE, 152 }; 153 154 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { 155 VIRTCHNL_PROTO_HDR_ETH, 156 VIRTCHNL_PROTO_HDR_IPV4, 157 VIRTCHNL_PROTO_HDR_AH, 158 VIRTCHNL_PROTO_HDR_NONE, 159 }; 160 161 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { 162 VIRTCHNL_PROTO_HDR_ETH, 163 VIRTCHNL_PROTO_HDR_IPV6, 164 VIRTCHNL_PROTO_HDR_AH, 165 VIRTCHNL_PROTO_HDR_NONE, 166 }; 167 168 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { 169 VIRTCHNL_PROTO_HDR_ETH, 170 VIRTCHNL_PROTO_HDR_IPV4, 171 VIRTCHNL_PROTO_HDR_UDP, 172 VIRTCHNL_PROTO_HDR_ESP, 173 VIRTCHNL_PROTO_HDR_NONE, 174 }; 175 176 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { 177 VIRTCHNL_PROTO_HDR_ETH, 178 VIRTCHNL_PROTO_HDR_IPV6, 179 VIRTCHNL_PROTO_HDR_UDP, 180 VIRTCHNL_PROTO_HDR_ESP, 181 VIRTCHNL_PROTO_HDR_NONE, 182 }; 183 184 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { 185 VIRTCHNL_PROTO_HDR_ETH, 186 VIRTCHNL_PROTO_HDR_IPV4, 187 VIRTCHNL_PROTO_HDR_UDP, 188 VIRTCHNL_PROTO_HDR_PFCP, 189 VIRTCHNL_PROTO_HDR_NONE, 190 }; 191 192 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { 193 VIRTCHNL_PROTO_HDR_ETH, 194 VIRTCHNL_PROTO_HDR_IPV6, 195 VIRTCHNL_PROTO_HDR_UDP, 196 VIRTCHNL_PROTO_HDR_PFCP, 197 VIRTCHNL_PROTO_HDR_NONE, 198 }; 199 200 struct virtchnl_fdir_pattern_match_item { 201 enum virtchnl_proto_hdr_type *list; 202 u64 input_set; 203 u64 *meta; 204 }; 205 206 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { 207 {vc_pattern_ipv4, 0, NULL}, 208 {vc_pattern_ipv4_tcp, 0, NULL}, 209 {vc_pattern_ipv4_udp, 0, NULL}, 210 {vc_pattern_ipv4_sctp, 0, NULL}, 211 {vc_pattern_ipv6, 0, NULL}, 212 {vc_pattern_ipv6_tcp, 0, NULL}, 213 {vc_pattern_ipv6_udp, 0, NULL}, 214 {vc_pattern_ipv6_sctp, 0, NULL}, 215 }; 216 217 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { 218 {vc_pattern_ipv4, 0, NULL}, 219 {vc_pattern_ipv4_tcp, 0, NULL}, 220 {vc_pattern_ipv4_udp, 0, NULL}, 221 {vc_pattern_ipv4_sctp, 0, NULL}, 222 {vc_pattern_ipv6, 0, NULL}, 223 {vc_pattern_ipv6_tcp, 0, NULL}, 224 {vc_pattern_ipv6_udp, 0, NULL}, 225 {vc_pattern_ipv6_sctp, 0, NULL}, 226 {vc_pattern_ether, 0, NULL}, 227 {vc_pattern_ipv4_gtpu, 0, NULL}, 228 {vc_pattern_ipv4_gtpu_eh, 0, NULL}, 229 {vc_pattern_ipv4_l2tpv3, 0, NULL}, 230 {vc_pattern_ipv6_l2tpv3, 0, NULL}, 231 {vc_pattern_ipv4_esp, 0, NULL}, 232 {vc_pattern_ipv6_esp, 0, NULL}, 233 {vc_pattern_ipv4_ah, 0, NULL}, 234 {vc_pattern_ipv6_ah, 0, NULL}, 235 {vc_pattern_ipv4_nat_t_esp, 0, NULL}, 236 {vc_pattern_ipv6_nat_t_esp, 0, NULL}, 237 {vc_pattern_ipv4_pfcp, 0, NULL}, 238 {vc_pattern_ipv6_pfcp, 0, NULL}, 239 }; 240 241 struct virtchnl_fdir_inset_map { 242 enum virtchnl_proto_hdr_field field; 243 enum ice_flow_field fld; 244 u64 flag; 245 u64 mask; 246 }; 247 248 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 249 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 250 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 251 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 252 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 253 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 254 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 255 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 256 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 257 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 258 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 259 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 260 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 261 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 262 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 263 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 264 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 265 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 266 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 267 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 268 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 269 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 270 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 271 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 272 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 273 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 274 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 275 }; 276 277 /** 278 * ice_vc_fdir_param_check 279 * @vf: pointer to the VF structure 280 * @vsi_id: VF relative VSI ID 281 * 282 * Check for the valid VSI ID, PF's state and VF's state 283 * 284 * Return: 0 on success, and -EINVAL on error. 285 */ 286 static int 287 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 288 { 289 struct ice_pf *pf = vf->pf; 290 291 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 292 return -EINVAL; 293 294 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 295 return -EINVAL; 296 297 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 298 return -EINVAL; 299 300 if (vsi_id != vf->lan_vsi_num) 301 return -EINVAL; 302 303 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 304 return -EINVAL; 305 306 if (!pf->vsi[vf->lan_vsi_idx]) 307 return -EINVAL; 308 309 return 0; 310 } 311 312 /** 313 * ice_vf_start_ctrl_vsi 314 * @vf: pointer to the VF structure 315 * 316 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 317 * 318 * Return: 0 on success, and other on error. 319 */ 320 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 321 { 322 struct ice_pf *pf = vf->pf; 323 struct ice_vsi *ctrl_vsi; 324 struct device *dev; 325 int err; 326 327 dev = ice_pf_to_dev(pf); 328 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 329 return -EEXIST; 330 331 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 332 if (!ctrl_vsi) { 333 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 334 vf->vf_id); 335 return -ENOMEM; 336 } 337 338 err = ice_vsi_open_ctrl(ctrl_vsi); 339 if (err) { 340 dev_dbg(dev, "Could not open control VSI for VF %d\n", 341 vf->vf_id); 342 goto err_vsi_open; 343 } 344 345 return 0; 346 347 err_vsi_open: 348 ice_vsi_release(ctrl_vsi); 349 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 350 pf->vsi[vf->ctrl_vsi_idx] = NULL; 351 vf->ctrl_vsi_idx = ICE_NO_VSI; 352 } 353 return err; 354 } 355 356 /** 357 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 358 * @vf: pointer to the VF structure 359 * @flow: filter flow type 360 * 361 * Return: 0 on success, and other on error. 362 */ 363 static int 364 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 365 { 366 struct ice_vf_fdir *fdir = &vf->fdir; 367 368 if (!fdir->fdir_prof) { 369 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 370 ICE_FLTR_PTYPE_MAX, 371 sizeof(*fdir->fdir_prof), 372 GFP_KERNEL); 373 if (!fdir->fdir_prof) 374 return -ENOMEM; 375 } 376 377 if (!fdir->fdir_prof[flow]) { 378 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 379 sizeof(**fdir->fdir_prof), 380 GFP_KERNEL); 381 if (!fdir->fdir_prof[flow]) 382 return -ENOMEM; 383 } 384 385 return 0; 386 } 387 388 /** 389 * ice_vc_fdir_free_prof - free profile for this filter flow type 390 * @vf: pointer to the VF structure 391 * @flow: filter flow type 392 */ 393 static void 394 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 395 { 396 struct ice_vf_fdir *fdir = &vf->fdir; 397 398 if (!fdir->fdir_prof) 399 return; 400 401 if (!fdir->fdir_prof[flow]) 402 return; 403 404 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 405 fdir->fdir_prof[flow] = NULL; 406 } 407 408 /** 409 * ice_vc_fdir_free_prof_all - free all the profile for this VF 410 * @vf: pointer to the VF structure 411 */ 412 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 413 { 414 struct ice_vf_fdir *fdir = &vf->fdir; 415 enum ice_fltr_ptype flow; 416 417 if (!fdir->fdir_prof) 418 return; 419 420 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 421 ice_vc_fdir_free_prof(vf, flow); 422 423 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 424 fdir->fdir_prof = NULL; 425 } 426 427 /** 428 * ice_vc_fdir_parse_flow_fld 429 * @proto_hdr: virtual channel protocol filter header 430 * @conf: FDIR configuration for each filter 431 * @fld: field type array 432 * @fld_cnt: field counter 433 * 434 * Parse the virtual channel filter header and store them into field type array 435 * 436 * Return: 0 on success, and other on error. 437 */ 438 static int 439 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 440 struct virtchnl_fdir_fltr_conf *conf, 441 enum ice_flow_field *fld, int *fld_cnt) 442 { 443 struct virtchnl_proto_hdr hdr; 444 u32 i; 445 446 memcpy(&hdr, proto_hdr, sizeof(hdr)); 447 448 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 449 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 450 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 451 if (fdir_inset_map[i].mask && 452 ((fdir_inset_map[i].mask & conf->inset_flag) != 453 fdir_inset_map[i].flag)) 454 continue; 455 456 fld[*fld_cnt] = fdir_inset_map[i].fld; 457 *fld_cnt += 1; 458 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 459 return -EINVAL; 460 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 461 fdir_inset_map[i].field); 462 } 463 464 return 0; 465 } 466 467 /** 468 * ice_vc_fdir_set_flow_fld 469 * @vf: pointer to the VF structure 470 * @fltr: virtual channel add cmd buffer 471 * @conf: FDIR configuration for each filter 472 * @seg: array of one or more packet segments that describe the flow 473 * 474 * Parse the virtual channel add msg buffer's field vector and store them into 475 * flow's packet segment field 476 * 477 * Return: 0 on success, and other on error. 478 */ 479 static int 480 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 481 struct virtchnl_fdir_fltr_conf *conf, 482 struct ice_flow_seg_info *seg) 483 { 484 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 485 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 486 struct device *dev = ice_pf_to_dev(vf->pf); 487 struct virtchnl_proto_hdrs *proto; 488 int fld_cnt = 0; 489 int i; 490 491 proto = &rule->proto_hdrs; 492 for (i = 0; i < proto->count; i++) { 493 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 494 int ret; 495 496 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 497 if (ret) 498 return ret; 499 } 500 501 if (fld_cnt == 0) { 502 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 503 return -EINVAL; 504 } 505 506 for (i = 0; i < fld_cnt; i++) 507 ice_flow_set_fld(seg, fld[i], 508 ICE_FLOW_FLD_OFF_INVAL, 509 ICE_FLOW_FLD_OFF_INVAL, 510 ICE_FLOW_FLD_OFF_INVAL, false); 511 512 return 0; 513 } 514 515 /** 516 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 517 * @vf: pointer to the VF structure 518 * @conf: FDIR configuration for each filter 519 * @seg: array of one or more packet segments that describe the flow 520 * 521 * Return: 0 on success, and other on error. 522 */ 523 static int 524 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 525 struct virtchnl_fdir_fltr_conf *conf, 526 struct ice_flow_seg_info *seg) 527 { 528 enum ice_fltr_ptype flow = conf->input.flow_type; 529 enum ice_fdir_tunnel_type ttype = conf->ttype; 530 struct device *dev = ice_pf_to_dev(vf->pf); 531 532 switch (flow) { 533 case ICE_FLTR_PTYPE_NON_IP_L2: 534 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 535 break; 536 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 537 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 538 ICE_FLOW_SEG_HDR_IPV4 | 539 ICE_FLOW_SEG_HDR_IPV_OTHER); 540 break; 541 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 542 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 543 ICE_FLOW_SEG_HDR_IPV4 | 544 ICE_FLOW_SEG_HDR_IPV_OTHER); 545 break; 546 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 547 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 548 ICE_FLOW_SEG_HDR_IPV4 | 549 ICE_FLOW_SEG_HDR_IPV_OTHER); 550 break; 551 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 552 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 553 ICE_FLOW_SEG_HDR_IPV4 | 554 ICE_FLOW_SEG_HDR_IPV_OTHER); 555 break; 556 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 557 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 558 ICE_FLOW_SEG_HDR_IPV4 | 559 ICE_FLOW_SEG_HDR_IPV_OTHER); 560 break; 561 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 562 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 563 ICE_FLOW_SEG_HDR_IPV4 | 564 ICE_FLOW_SEG_HDR_IPV_OTHER); 565 break; 566 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 567 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 568 ICE_FLOW_SEG_HDR_IPV_OTHER); 569 break; 570 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 571 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 572 ICE_FLOW_SEG_HDR_IPV4 | 573 ICE_FLOW_SEG_HDR_IPV_OTHER); 574 break; 575 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 576 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 577 ICE_FLOW_SEG_HDR_IPV4 | 578 ICE_FLOW_SEG_HDR_IPV_OTHER); 579 break; 580 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 581 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 582 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 583 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 584 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 585 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 586 ICE_FLOW_SEG_HDR_IPV4 | 587 ICE_FLOW_SEG_HDR_IPV_OTHER); 588 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 589 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 590 ICE_FLOW_SEG_HDR_GTPU_IP | 591 ICE_FLOW_SEG_HDR_IPV4 | 592 ICE_FLOW_SEG_HDR_IPV_OTHER); 593 } else { 594 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 595 flow, vf->vf_id); 596 return -EINVAL; 597 } 598 break; 599 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 600 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 601 ICE_FLOW_SEG_HDR_IPV4 | 602 ICE_FLOW_SEG_HDR_IPV_OTHER); 603 break; 604 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 605 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 606 ICE_FLOW_SEG_HDR_IPV6 | 607 ICE_FLOW_SEG_HDR_IPV_OTHER); 608 break; 609 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 610 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 611 ICE_FLOW_SEG_HDR_IPV6 | 612 ICE_FLOW_SEG_HDR_IPV_OTHER); 613 break; 614 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 615 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 616 ICE_FLOW_SEG_HDR_IPV6 | 617 ICE_FLOW_SEG_HDR_IPV_OTHER); 618 break; 619 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 620 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 621 ICE_FLOW_SEG_HDR_IPV6 | 622 ICE_FLOW_SEG_HDR_IPV_OTHER); 623 break; 624 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 625 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 626 ICE_FLOW_SEG_HDR_IPV6 | 627 ICE_FLOW_SEG_HDR_IPV_OTHER); 628 break; 629 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 630 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 631 ICE_FLOW_SEG_HDR_IPV6 | 632 ICE_FLOW_SEG_HDR_IPV_OTHER); 633 break; 634 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 635 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 636 ICE_FLOW_SEG_HDR_IPV_OTHER); 637 break; 638 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 639 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 640 ICE_FLOW_SEG_HDR_IPV6 | 641 ICE_FLOW_SEG_HDR_IPV_OTHER); 642 break; 643 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 644 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 645 ICE_FLOW_SEG_HDR_IPV6 | 646 ICE_FLOW_SEG_HDR_IPV_OTHER); 647 break; 648 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 649 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 650 ICE_FLOW_SEG_HDR_IPV6 | 651 ICE_FLOW_SEG_HDR_IPV_OTHER); 652 break; 653 default: 654 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 655 flow, vf->vf_id); 656 return -EINVAL; 657 } 658 659 return 0; 660 } 661 662 /** 663 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 664 * @vf: pointer to the VF structure 665 * @flow: filter flow type 666 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 667 */ 668 static void 669 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 670 { 671 struct ice_vf_fdir *fdir = &vf->fdir; 672 struct ice_fd_hw_prof *vf_prof; 673 struct ice_pf *pf = vf->pf; 674 struct ice_vsi *vf_vsi; 675 struct device *dev; 676 struct ice_hw *hw; 677 u64 prof_id; 678 int i; 679 680 dev = ice_pf_to_dev(pf); 681 hw = &pf->hw; 682 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 683 return; 684 685 vf_prof = fdir->fdir_prof[flow]; 686 687 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 688 if (!vf_vsi) { 689 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 690 return; 691 } 692 693 if (!fdir->prof_entry_cnt[flow][tun]) 694 return; 695 696 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 697 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 698 699 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 700 if (vf_prof->entry_h[i][tun]) { 701 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 702 703 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 704 ice_flow_rem_entry(hw, ICE_BLK_FD, 705 vf_prof->entry_h[i][tun]); 706 vf_prof->entry_h[i][tun] = 0; 707 } 708 709 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 710 devm_kfree(dev, vf_prof->fdir_seg[tun]); 711 vf_prof->fdir_seg[tun] = NULL; 712 713 for (i = 0; i < vf_prof->cnt; i++) 714 vf_prof->vsi_h[i] = 0; 715 716 fdir->prof_entry_cnt[flow][tun] = 0; 717 } 718 719 /** 720 * ice_vc_fdir_rem_prof_all - remove profile for this VF 721 * @vf: pointer to the VF structure 722 */ 723 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 724 { 725 enum ice_fltr_ptype flow; 726 727 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 728 flow < ICE_FLTR_PTYPE_MAX; flow++) { 729 ice_vc_fdir_rem_prof(vf, flow, 0); 730 ice_vc_fdir_rem_prof(vf, flow, 1); 731 } 732 } 733 734 /** 735 * ice_vc_fdir_write_flow_prof 736 * @vf: pointer to the VF structure 737 * @flow: filter flow type 738 * @seg: array of one or more packet segments that describe the flow 739 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 740 * 741 * Write the flow's profile config and packet segment into the hardware 742 * 743 * Return: 0 on success, and other on error. 744 */ 745 static int 746 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 747 struct ice_flow_seg_info *seg, int tun) 748 { 749 struct ice_vf_fdir *fdir = &vf->fdir; 750 struct ice_vsi *vf_vsi, *ctrl_vsi; 751 struct ice_flow_seg_info *old_seg; 752 struct ice_flow_prof *prof = NULL; 753 struct ice_fd_hw_prof *vf_prof; 754 enum ice_status status; 755 struct device *dev; 756 struct ice_pf *pf; 757 struct ice_hw *hw; 758 u64 entry1_h = 0; 759 u64 entry2_h = 0; 760 u64 prof_id; 761 int ret; 762 763 pf = vf->pf; 764 dev = ice_pf_to_dev(pf); 765 hw = &pf->hw; 766 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 767 if (!vf_vsi) 768 return -EINVAL; 769 770 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 771 if (!ctrl_vsi) 772 return -EINVAL; 773 774 vf_prof = fdir->fdir_prof[flow]; 775 old_seg = vf_prof->fdir_seg[tun]; 776 if (old_seg) { 777 if (!memcmp(old_seg, seg, sizeof(*seg))) { 778 dev_dbg(dev, "Duplicated profile for VF %d!\n", 779 vf->vf_id); 780 return -EEXIST; 781 } 782 783 if (fdir->fdir_fltr_cnt[flow][tun]) { 784 ret = -EINVAL; 785 dev_dbg(dev, "Input set conflicts for VF %d\n", 786 vf->vf_id); 787 goto err_exit; 788 } 789 790 /* remove previously allocated profile */ 791 ice_vc_fdir_rem_prof(vf, flow, tun); 792 } 793 794 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 795 tun ? ICE_FLTR_PTYPE_MAX : 0); 796 797 status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 798 tun + 1, &prof); 799 ret = ice_status_to_errno(status); 800 if (ret) { 801 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 802 flow, vf->vf_id); 803 goto err_exit; 804 } 805 806 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 807 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 808 seg, &entry1_h); 809 ret = ice_status_to_errno(status); 810 if (ret) { 811 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 812 flow, vf->vf_id); 813 goto err_prof; 814 } 815 816 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 817 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 818 seg, &entry2_h); 819 ret = ice_status_to_errno(status); 820 if (ret) { 821 dev_dbg(dev, 822 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 823 flow, vf->vf_id); 824 goto err_entry_1; 825 } 826 827 vf_prof->fdir_seg[tun] = seg; 828 vf_prof->cnt = 0; 829 fdir->prof_entry_cnt[flow][tun] = 0; 830 831 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 832 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 833 vf_prof->cnt++; 834 fdir->prof_entry_cnt[flow][tun]++; 835 836 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 837 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 838 vf_prof->cnt++; 839 fdir->prof_entry_cnt[flow][tun]++; 840 841 return 0; 842 843 err_entry_1: 844 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 845 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 846 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 847 err_prof: 848 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 849 err_exit: 850 return ret; 851 } 852 853 /** 854 * ice_vc_fdir_config_input_set 855 * @vf: pointer to the VF structure 856 * @fltr: virtual channel add cmd buffer 857 * @conf: FDIR configuration for each filter 858 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 859 * 860 * Config the input set type and value for virtual channel add msg buffer 861 * 862 * Return: 0 on success, and other on error. 863 */ 864 static int 865 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 866 struct virtchnl_fdir_fltr_conf *conf, int tun) 867 { 868 struct ice_fdir_fltr *input = &conf->input; 869 struct device *dev = ice_pf_to_dev(vf->pf); 870 struct ice_flow_seg_info *seg; 871 enum ice_fltr_ptype flow; 872 int ret; 873 874 flow = input->flow_type; 875 ret = ice_vc_fdir_alloc_prof(vf, flow); 876 if (ret) { 877 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 878 return ret; 879 } 880 881 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 882 if (!seg) 883 return -ENOMEM; 884 885 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 886 if (ret) { 887 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 888 goto err_exit; 889 } 890 891 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 892 if (ret) { 893 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 894 goto err_exit; 895 } 896 897 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 898 if (ret == -EEXIST) { 899 devm_kfree(dev, seg); 900 } else if (ret) { 901 dev_dbg(dev, "Write flow profile for VF %d failed\n", 902 vf->vf_id); 903 goto err_exit; 904 } 905 906 return 0; 907 908 err_exit: 909 devm_kfree(dev, seg); 910 return ret; 911 } 912 913 /** 914 * ice_vc_fdir_match_pattern 915 * @fltr: virtual channel add cmd buffer 916 * @type: virtual channel protocol filter header type 917 * 918 * Matching the header type by comparing fltr and type's value. 919 * 920 * Return: true on success, and false on error. 921 */ 922 static bool 923 ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, 924 enum virtchnl_proto_hdr_type *type) 925 { 926 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 927 int i = 0; 928 929 while ((i < proto->count) && 930 (*type == proto->proto_hdr[i].type) && 931 (*type != VIRTCHNL_PROTO_HDR_NONE)) { 932 type++; 933 i++; 934 } 935 936 return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); 937 } 938 939 /** 940 * ice_vc_fdir_get_pattern - get while list pattern 941 * @vf: pointer to the VF info 942 * @len: filter list length 943 * 944 * Return: pointer to allowed filter list 945 */ 946 static const struct virtchnl_fdir_pattern_match_item * 947 ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) 948 { 949 const struct virtchnl_fdir_pattern_match_item *item; 950 struct ice_pf *pf = vf->pf; 951 struct ice_hw *hw; 952 953 hw = &pf->hw; 954 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", 955 sizeof(hw->active_pkg_name))) { 956 item = vc_fdir_pattern_comms; 957 *len = ARRAY_SIZE(vc_fdir_pattern_comms); 958 } else { 959 item = vc_fdir_pattern_os; 960 *len = ARRAY_SIZE(vc_fdir_pattern_os); 961 } 962 963 return item; 964 } 965 966 /** 967 * ice_vc_fdir_search_pattern 968 * @vf: pointer to the VF info 969 * @fltr: virtual channel add cmd buffer 970 * 971 * Search for matched pattern from supported pattern list 972 * 973 * Return: 0 on success, and other on error. 974 */ 975 static int 976 ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) 977 { 978 const struct virtchnl_fdir_pattern_match_item *pattern; 979 int len, i; 980 981 pattern = ice_vc_fdir_get_pattern(vf, &len); 982 983 for (i = 0; i < len; i++) 984 if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) 985 return 0; 986 987 return -EINVAL; 988 } 989 990 /** 991 * ice_vc_fdir_parse_pattern 992 * @vf: pointer to the VF info 993 * @fltr: virtual channel add cmd buffer 994 * @conf: FDIR configuration for each filter 995 * 996 * Parse the virtual channel filter's pattern and store them into conf 997 * 998 * Return: 0 on success, and other on error. 999 */ 1000 static int 1001 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1002 struct virtchnl_fdir_fltr_conf *conf) 1003 { 1004 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1005 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 1006 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 1007 struct device *dev = ice_pf_to_dev(vf->pf); 1008 struct ice_fdir_fltr *input = &conf->input; 1009 int i; 1010 1011 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 1012 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 1013 proto->count, vf->vf_id); 1014 return -EINVAL; 1015 } 1016 1017 for (i = 0; i < proto->count; i++) { 1018 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 1019 struct ip_esp_hdr *esph; 1020 struct ip_auth_hdr *ah; 1021 struct sctphdr *sctph; 1022 struct ipv6hdr *ip6h; 1023 struct udphdr *udph; 1024 struct tcphdr *tcph; 1025 struct ethhdr *eth; 1026 struct iphdr *iph; 1027 u8 s_field; 1028 u8 *rawh; 1029 1030 switch (hdr->type) { 1031 case VIRTCHNL_PROTO_HDR_ETH: 1032 eth = (struct ethhdr *)hdr->buffer; 1033 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 1034 1035 if (hdr->field_selector) 1036 input->ext_data.ether_type = eth->h_proto; 1037 break; 1038 case VIRTCHNL_PROTO_HDR_IPV4: 1039 iph = (struct iphdr *)hdr->buffer; 1040 l3 = VIRTCHNL_PROTO_HDR_IPV4; 1041 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 1042 1043 if (hdr->field_selector) { 1044 input->ip.v4.src_ip = iph->saddr; 1045 input->ip.v4.dst_ip = iph->daddr; 1046 input->ip.v4.tos = iph->tos; 1047 input->ip.v4.proto = iph->protocol; 1048 } 1049 break; 1050 case VIRTCHNL_PROTO_HDR_IPV6: 1051 ip6h = (struct ipv6hdr *)hdr->buffer; 1052 l3 = VIRTCHNL_PROTO_HDR_IPV6; 1053 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 1054 1055 if (hdr->field_selector) { 1056 memcpy(input->ip.v6.src_ip, 1057 ip6h->saddr.in6_u.u6_addr8, 1058 sizeof(ip6h->saddr)); 1059 memcpy(input->ip.v6.dst_ip, 1060 ip6h->daddr.in6_u.u6_addr8, 1061 sizeof(ip6h->daddr)); 1062 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 1063 (ip6h->flow_lbl[0] >> 4); 1064 input->ip.v6.proto = ip6h->nexthdr; 1065 } 1066 break; 1067 case VIRTCHNL_PROTO_HDR_TCP: 1068 tcph = (struct tcphdr *)hdr->buffer; 1069 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1070 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 1071 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1072 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 1073 1074 if (hdr->field_selector) { 1075 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1076 input->ip.v4.src_port = tcph->source; 1077 input->ip.v4.dst_port = tcph->dest; 1078 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1079 input->ip.v6.src_port = tcph->source; 1080 input->ip.v6.dst_port = tcph->dest; 1081 } 1082 } 1083 break; 1084 case VIRTCHNL_PROTO_HDR_UDP: 1085 udph = (struct udphdr *)hdr->buffer; 1086 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1087 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 1088 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1089 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 1090 1091 if (hdr->field_selector) { 1092 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1093 input->ip.v4.src_port = udph->source; 1094 input->ip.v4.dst_port = udph->dest; 1095 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1096 input->ip.v6.src_port = udph->source; 1097 input->ip.v6.dst_port = udph->dest; 1098 } 1099 } 1100 break; 1101 case VIRTCHNL_PROTO_HDR_SCTP: 1102 sctph = (struct sctphdr *)hdr->buffer; 1103 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1104 input->flow_type = 1105 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 1106 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1107 input->flow_type = 1108 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 1109 1110 if (hdr->field_selector) { 1111 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 1112 input->ip.v4.src_port = sctph->source; 1113 input->ip.v4.dst_port = sctph->dest; 1114 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 1115 input->ip.v6.src_port = sctph->source; 1116 input->ip.v6.dst_port = sctph->dest; 1117 } 1118 } 1119 break; 1120 case VIRTCHNL_PROTO_HDR_L2TPV3: 1121 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1122 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 1123 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1124 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 1125 1126 if (hdr->field_selector) 1127 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 1128 break; 1129 case VIRTCHNL_PROTO_HDR_ESP: 1130 esph = (struct ip_esp_hdr *)hdr->buffer; 1131 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1132 l4 == VIRTCHNL_PROTO_HDR_UDP) 1133 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 1134 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1135 l4 == VIRTCHNL_PROTO_HDR_UDP) 1136 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 1137 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 1138 l4 == VIRTCHNL_PROTO_HDR_NONE) 1139 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 1140 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 1141 l4 == VIRTCHNL_PROTO_HDR_NONE) 1142 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 1143 1144 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 1145 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 1146 else 1147 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 1148 1149 if (hdr->field_selector) { 1150 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1151 input->ip.v4.sec_parm_idx = esph->spi; 1152 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1153 input->ip.v6.sec_parm_idx = esph->spi; 1154 } 1155 break; 1156 case VIRTCHNL_PROTO_HDR_AH: 1157 ah = (struct ip_auth_hdr *)hdr->buffer; 1158 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1159 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 1160 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1161 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 1162 1163 if (hdr->field_selector) { 1164 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1165 input->ip.v4.sec_parm_idx = ah->spi; 1166 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1167 input->ip.v6.sec_parm_idx = ah->spi; 1168 } 1169 break; 1170 case VIRTCHNL_PROTO_HDR_PFCP: 1171 rawh = (u8 *)hdr->buffer; 1172 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 1173 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 1174 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 1175 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 1176 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 1177 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 1178 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 1179 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 1180 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 1181 1182 if (hdr->field_selector) { 1183 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1184 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 1185 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1186 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1187 } 1188 break; 1189 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1190 rawh = (u8 *)hdr->buffer; 1191 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1192 1193 if (hdr->field_selector) 1194 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1195 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1196 break; 1197 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1198 rawh = (u8 *)hdr->buffer; 1199 1200 if (hdr->field_selector) 1201 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1202 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1203 break; 1204 default: 1205 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1206 hdr->type, vf->vf_id); 1207 return -EINVAL; 1208 } 1209 } 1210 1211 return 0; 1212 } 1213 1214 /** 1215 * ice_vc_fdir_parse_action 1216 * @vf: pointer to the VF info 1217 * @fltr: virtual channel add cmd buffer 1218 * @conf: FDIR configuration for each filter 1219 * 1220 * Parse the virtual channel filter's action and store them into conf 1221 * 1222 * Return: 0 on success, and other on error. 1223 */ 1224 static int 1225 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1226 struct virtchnl_fdir_fltr_conf *conf) 1227 { 1228 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1229 struct device *dev = ice_pf_to_dev(vf->pf); 1230 struct ice_fdir_fltr *input = &conf->input; 1231 u32 dest_num = 0; 1232 u32 mark_num = 0; 1233 int i; 1234 1235 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1236 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1237 as->count, vf->vf_id); 1238 return -EINVAL; 1239 } 1240 1241 for (i = 0; i < as->count; i++) { 1242 struct virtchnl_filter_action *action = &as->actions[i]; 1243 1244 switch (action->type) { 1245 case VIRTCHNL_ACTION_PASSTHRU: 1246 dest_num++; 1247 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1248 break; 1249 case VIRTCHNL_ACTION_DROP: 1250 dest_num++; 1251 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1252 break; 1253 case VIRTCHNL_ACTION_QUEUE: 1254 dest_num++; 1255 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1256 input->q_index = action->act_conf.queue.index; 1257 break; 1258 case VIRTCHNL_ACTION_Q_REGION: 1259 dest_num++; 1260 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1261 input->q_index = action->act_conf.queue.index; 1262 input->q_region = action->act_conf.queue.region; 1263 break; 1264 case VIRTCHNL_ACTION_MARK: 1265 mark_num++; 1266 input->fltr_id = action->act_conf.mark_id; 1267 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1268 break; 1269 default: 1270 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1271 action->type, vf->vf_id); 1272 return -EINVAL; 1273 } 1274 } 1275 1276 if (dest_num == 0 || dest_num >= 2) { 1277 dev_dbg(dev, "Invalid destination action for VF %d\n", 1278 vf->vf_id); 1279 return -EINVAL; 1280 } 1281 1282 if (mark_num >= 2) { 1283 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1284 return -EINVAL; 1285 } 1286 1287 return 0; 1288 } 1289 1290 /** 1291 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1292 * @vf: pointer to the VF info 1293 * @fltr: virtual channel add cmd buffer 1294 * @conf: FDIR configuration for each filter 1295 * 1296 * Return: 0 on success, and other on error. 1297 */ 1298 static int 1299 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1300 struct virtchnl_fdir_fltr_conf *conf) 1301 { 1302 int ret; 1303 1304 ret = ice_vc_fdir_search_pattern(vf, fltr); 1305 if (ret) 1306 return ret; 1307 1308 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1309 if (ret) 1310 return ret; 1311 1312 return ice_vc_fdir_parse_action(vf, fltr, conf); 1313 } 1314 1315 /** 1316 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1317 * @conf_a: FDIR configuration for filter a 1318 * @conf_b: FDIR configuration for filter b 1319 * 1320 * Return: 0 on success, and other on error. 1321 */ 1322 static bool 1323 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1324 struct virtchnl_fdir_fltr_conf *conf_b) 1325 { 1326 struct ice_fdir_fltr *a = &conf_a->input; 1327 struct ice_fdir_fltr *b = &conf_b->input; 1328 1329 if (conf_a->ttype != conf_b->ttype) 1330 return false; 1331 if (a->flow_type != b->flow_type) 1332 return false; 1333 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1334 return false; 1335 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1336 return false; 1337 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1338 return false; 1339 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1340 return false; 1341 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1342 return false; 1343 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1344 return false; 1345 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1346 return false; 1347 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1348 return false; 1349 1350 return true; 1351 } 1352 1353 /** 1354 * ice_vc_fdir_is_dup_fltr 1355 * @vf: pointer to the VF info 1356 * @conf: FDIR configuration for each filter 1357 * 1358 * Check if there is duplicated rule with same conf value 1359 * 1360 * Return: 0 true success, and false on error. 1361 */ 1362 static bool 1363 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1364 { 1365 struct ice_fdir_fltr *desc; 1366 bool ret; 1367 1368 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1369 struct virtchnl_fdir_fltr_conf *node = 1370 to_fltr_conf_from_desc(desc); 1371 1372 ret = ice_vc_fdir_comp_rules(node, conf); 1373 if (ret) 1374 return true; 1375 } 1376 1377 return false; 1378 } 1379 1380 /** 1381 * ice_vc_fdir_insert_entry 1382 * @vf: pointer to the VF info 1383 * @conf: FDIR configuration for each filter 1384 * @id: pointer to ID value allocated by driver 1385 * 1386 * Insert FDIR conf entry into list and allocate ID for this filter 1387 * 1388 * Return: 0 true success, and other on error. 1389 */ 1390 static int 1391 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1392 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1393 { 1394 struct ice_fdir_fltr *input = &conf->input; 1395 int i; 1396 1397 /* alloc ID corresponding with conf */ 1398 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1399 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1400 if (i < 0) 1401 return -EINVAL; 1402 *id = i; 1403 1404 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1405 return 0; 1406 } 1407 1408 /** 1409 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1410 * @vf: pointer to the VF info 1411 * @conf: FDIR configuration for each filter 1412 * @id: filter rule's ID 1413 */ 1414 static void 1415 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1416 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1417 { 1418 struct ice_fdir_fltr *input = &conf->input; 1419 1420 idr_remove(&vf->fdir.fdir_rule_idr, id); 1421 list_del(&input->fltr_node); 1422 } 1423 1424 /** 1425 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1426 * @vf: pointer to the VF info 1427 * @id: filter rule's ID 1428 * 1429 * Return: NULL on error, and other on success. 1430 */ 1431 static struct virtchnl_fdir_fltr_conf * 1432 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1433 { 1434 return idr_find(&vf->fdir.fdir_rule_idr, id); 1435 } 1436 1437 /** 1438 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1439 * @vf: pointer to the VF info 1440 */ 1441 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1442 { 1443 struct virtchnl_fdir_fltr_conf *conf; 1444 struct ice_fdir_fltr *desc, *temp; 1445 1446 list_for_each_entry_safe(desc, temp, 1447 &vf->fdir.fdir_rule_list, fltr_node) { 1448 conf = to_fltr_conf_from_desc(desc); 1449 list_del(&desc->fltr_node); 1450 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1451 } 1452 } 1453 1454 /** 1455 * ice_vc_fdir_write_fltr - write filter rule into hardware 1456 * @vf: pointer to the VF info 1457 * @conf: FDIR configuration for each filter 1458 * @add: true implies add rule, false implies del rules 1459 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1460 * 1461 * Return: 0 on success, and other on error. 1462 */ 1463 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1464 struct virtchnl_fdir_fltr_conf *conf, 1465 bool add, bool is_tun) 1466 { 1467 struct ice_fdir_fltr *input = &conf->input; 1468 struct ice_vsi *vsi, *ctrl_vsi; 1469 struct ice_fltr_desc desc; 1470 enum ice_status status; 1471 struct device *dev; 1472 struct ice_pf *pf; 1473 struct ice_hw *hw; 1474 int ret; 1475 u8 *pkt; 1476 1477 pf = vf->pf; 1478 dev = ice_pf_to_dev(pf); 1479 hw = &pf->hw; 1480 vsi = pf->vsi[vf->lan_vsi_idx]; 1481 if (!vsi) { 1482 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1483 return -EINVAL; 1484 } 1485 1486 input->dest_vsi = vsi->idx; 1487 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1488 1489 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1490 if (!ctrl_vsi) { 1491 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1492 return -EINVAL; 1493 } 1494 1495 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1496 if (!pkt) 1497 return -ENOMEM; 1498 1499 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1500 status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1501 ret = ice_status_to_errno(status); 1502 if (ret) { 1503 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1504 vf->vf_id, input->flow_type); 1505 goto err_free_pkt; 1506 } 1507 1508 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1509 if (ret) 1510 goto err_free_pkt; 1511 1512 return 0; 1513 1514 err_free_pkt: 1515 devm_kfree(dev, pkt); 1516 return ret; 1517 } 1518 1519 /** 1520 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1521 * @t: pointer to timer_list 1522 */ 1523 static void ice_vf_fdir_timer(struct timer_list *t) 1524 { 1525 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1526 struct ice_vf_fdir_ctx *ctx_done; 1527 struct ice_vf_fdir *fdir; 1528 unsigned long flags; 1529 struct ice_vf *vf; 1530 struct ice_pf *pf; 1531 1532 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1533 vf = container_of(fdir, struct ice_vf, fdir); 1534 ctx_done = &fdir->ctx_done; 1535 pf = vf->pf; 1536 spin_lock_irqsave(&fdir->ctx_lock, flags); 1537 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1538 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1539 WARN_ON_ONCE(1); 1540 return; 1541 } 1542 1543 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1544 1545 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1546 ctx_done->conf = ctx_irq->conf; 1547 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1548 ctx_done->v_opcode = ctx_irq->v_opcode; 1549 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1550 1551 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1552 ice_service_task_schedule(pf); 1553 } 1554 1555 /** 1556 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1557 * @ctrl_vsi: pointer to a VF's CTRL VSI 1558 * @rx_desc: pointer to FDIR Rx queue descriptor 1559 */ 1560 void 1561 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1562 union ice_32b_rx_flex_desc *rx_desc) 1563 { 1564 struct ice_pf *pf = ctrl_vsi->back; 1565 struct ice_vf_fdir_ctx *ctx_done; 1566 struct ice_vf_fdir_ctx *ctx_irq; 1567 struct ice_vf_fdir *fdir; 1568 unsigned long flags; 1569 struct device *dev; 1570 struct ice_vf *vf; 1571 int ret; 1572 1573 vf = &pf->vf[ctrl_vsi->vf_id]; 1574 1575 fdir = &vf->fdir; 1576 ctx_done = &fdir->ctx_done; 1577 ctx_irq = &fdir->ctx_irq; 1578 dev = ice_pf_to_dev(pf); 1579 spin_lock_irqsave(&fdir->ctx_lock, flags); 1580 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1581 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1582 WARN_ON_ONCE(1); 1583 return; 1584 } 1585 1586 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1587 1588 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1589 ctx_done->conf = ctx_irq->conf; 1590 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1591 ctx_done->v_opcode = ctx_irq->v_opcode; 1592 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1593 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1594 1595 ret = del_timer(&ctx_irq->rx_tmr); 1596 if (!ret) 1597 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1598 1599 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1600 ice_service_task_schedule(pf); 1601 } 1602 1603 /** 1604 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1605 * @vf: pointer to the VF info 1606 */ 1607 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1608 { 1609 struct ice_vsi *vf_vsi; 1610 u32 fd_size, fd_cnt; 1611 struct device *dev; 1612 struct ice_pf *pf; 1613 struct ice_hw *hw; 1614 u16 vsi_num; 1615 1616 pf = vf->pf; 1617 hw = &pf->hw; 1618 dev = ice_pf_to_dev(pf); 1619 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 1620 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1621 1622 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1623 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1624 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", 1625 vf->vf_id, 1626 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1627 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1628 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1629 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1630 } 1631 1632 /** 1633 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1634 * @vf: pointer to the VF info 1635 * @ctx: FDIR context info for post processing 1636 * @status: virtchnl FDIR program status 1637 * 1638 * Return: 0 on success, and other on error. 1639 */ 1640 static int 1641 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1642 enum virtchnl_fdir_prgm_status *status) 1643 { 1644 struct device *dev = ice_pf_to_dev(vf->pf); 1645 u32 stat_err, error, prog_id; 1646 int ret; 1647 1648 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1649 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1650 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1651 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1652 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1653 ret = -EINVAL; 1654 goto err_exit; 1655 } 1656 1657 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1658 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1659 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1660 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1661 dev_err(dev, "VF %d: Desc show add, but ctx not", 1662 vf->vf_id); 1663 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1664 ret = -EINVAL; 1665 goto err_exit; 1666 } 1667 1668 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1669 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1670 dev_err(dev, "VF %d: Desc show del, but ctx not", 1671 vf->vf_id); 1672 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1673 ret = -EINVAL; 1674 goto err_exit; 1675 } 1676 1677 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1678 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1679 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1680 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1681 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1682 vf->vf_id); 1683 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1684 } else { 1685 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1686 vf->vf_id); 1687 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1688 } 1689 ret = -EINVAL; 1690 goto err_exit; 1691 } 1692 1693 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1694 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1695 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1696 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1697 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1698 ret = -EINVAL; 1699 goto err_exit; 1700 } 1701 1702 *status = VIRTCHNL_FDIR_SUCCESS; 1703 1704 return 0; 1705 1706 err_exit: 1707 ice_vf_fdir_dump_info(vf); 1708 return ret; 1709 } 1710 1711 /** 1712 * ice_vc_add_fdir_fltr_post 1713 * @vf: pointer to the VF structure 1714 * @ctx: FDIR context info for post processing 1715 * @status: virtchnl FDIR program status 1716 * @success: true implies success, false implies failure 1717 * 1718 * Post process for flow director add command. If success, then do post process 1719 * and send back success msg by virtchnl. Otherwise, do context reversion and 1720 * send back failure msg by virtchnl. 1721 * 1722 * Return: 0 on success, and other on error. 1723 */ 1724 static int 1725 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1726 enum virtchnl_fdir_prgm_status status, 1727 bool success) 1728 { 1729 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1730 struct device *dev = ice_pf_to_dev(vf->pf); 1731 enum virtchnl_status_code v_ret; 1732 struct virtchnl_fdir_add *resp; 1733 int ret, len, is_tun; 1734 1735 v_ret = VIRTCHNL_STATUS_SUCCESS; 1736 len = sizeof(*resp); 1737 resp = kzalloc(len, GFP_KERNEL); 1738 if (!resp) { 1739 len = 0; 1740 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1741 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1742 goto err_exit; 1743 } 1744 1745 if (!success) 1746 goto err_exit; 1747 1748 is_tun = 0; 1749 resp->status = status; 1750 resp->flow_id = conf->flow_id; 1751 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1752 1753 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1754 (u8 *)resp, len); 1755 kfree(resp); 1756 1757 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1758 vf->vf_id, conf->flow_id, 1759 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1760 "add" : "del"); 1761 return ret; 1762 1763 err_exit: 1764 if (resp) 1765 resp->status = status; 1766 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1767 devm_kfree(dev, conf); 1768 1769 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1770 (u8 *)resp, len); 1771 kfree(resp); 1772 return ret; 1773 } 1774 1775 /** 1776 * ice_vc_del_fdir_fltr_post 1777 * @vf: pointer to the VF structure 1778 * @ctx: FDIR context info for post processing 1779 * @status: virtchnl FDIR program status 1780 * @success: true implies success, false implies failure 1781 * 1782 * Post process for flow director del command. If success, then do post process 1783 * and send back success msg by virtchnl. Otherwise, do context reversion and 1784 * send back failure msg by virtchnl. 1785 * 1786 * Return: 0 on success, and other on error. 1787 */ 1788 static int 1789 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1790 enum virtchnl_fdir_prgm_status status, 1791 bool success) 1792 { 1793 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1794 struct device *dev = ice_pf_to_dev(vf->pf); 1795 enum virtchnl_status_code v_ret; 1796 struct virtchnl_fdir_del *resp; 1797 int ret, len, is_tun; 1798 1799 v_ret = VIRTCHNL_STATUS_SUCCESS; 1800 len = sizeof(*resp); 1801 resp = kzalloc(len, GFP_KERNEL); 1802 if (!resp) { 1803 len = 0; 1804 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1805 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1806 goto err_exit; 1807 } 1808 1809 if (!success) 1810 goto err_exit; 1811 1812 is_tun = 0; 1813 resp->status = status; 1814 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1815 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1816 1817 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1818 (u8 *)resp, len); 1819 kfree(resp); 1820 1821 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1822 vf->vf_id, conf->flow_id, 1823 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1824 "add" : "del"); 1825 devm_kfree(dev, conf); 1826 return ret; 1827 1828 err_exit: 1829 if (resp) 1830 resp->status = status; 1831 if (success) 1832 devm_kfree(dev, conf); 1833 1834 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1835 (u8 *)resp, len); 1836 kfree(resp); 1837 return ret; 1838 } 1839 1840 /** 1841 * ice_flush_fdir_ctx 1842 * @pf: pointer to the PF structure 1843 * 1844 * Flush all the pending event on ctx_done list and process them. 1845 */ 1846 void ice_flush_fdir_ctx(struct ice_pf *pf) 1847 { 1848 int i; 1849 1850 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1851 return; 1852 1853 ice_for_each_vf(pf, i) { 1854 struct device *dev = ice_pf_to_dev(pf); 1855 enum virtchnl_fdir_prgm_status status; 1856 struct ice_vf *vf = &pf->vf[i]; 1857 struct ice_vf_fdir_ctx *ctx; 1858 unsigned long flags; 1859 int ret; 1860 1861 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1862 continue; 1863 1864 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1865 continue; 1866 1867 ctx = &vf->fdir.ctx_done; 1868 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1869 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1870 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1871 continue; 1872 } 1873 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1874 1875 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1876 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1877 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1878 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1879 vf->vf_id); 1880 goto err_exit; 1881 } 1882 1883 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1884 if (ret) 1885 goto err_exit; 1886 1887 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1888 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1889 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1890 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1891 else 1892 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1893 1894 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1895 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1896 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1897 continue; 1898 err_exit: 1899 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1900 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1901 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1902 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1903 else 1904 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1905 1906 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1907 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1908 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1909 } 1910 } 1911 1912 /** 1913 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1914 * @vf: pointer to the VF structure 1915 * @conf: FDIR configuration for each filter 1916 * @v_opcode: virtual channel operation code 1917 * 1918 * Return: 0 on success, and other on error. 1919 */ 1920 static int 1921 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1922 enum virtchnl_ops v_opcode) 1923 { 1924 struct device *dev = ice_pf_to_dev(vf->pf); 1925 struct ice_vf_fdir_ctx *ctx; 1926 unsigned long flags; 1927 1928 ctx = &vf->fdir.ctx_irq; 1929 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1930 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1931 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1932 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1933 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1934 vf->vf_id); 1935 return -EBUSY; 1936 } 1937 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1938 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1939 1940 ctx->conf = conf; 1941 ctx->v_opcode = v_opcode; 1942 ctx->stat = ICE_FDIR_CTX_READY; 1943 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1944 1945 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1946 1947 return 0; 1948 } 1949 1950 /** 1951 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1952 * @vf: pointer to the VF structure 1953 * 1954 * Return: 0 on success, and other on error. 1955 */ 1956 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1957 { 1958 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1959 unsigned long flags; 1960 1961 del_timer(&ctx->rx_tmr); 1962 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1963 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1964 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1965 } 1966 1967 /** 1968 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1969 * @vf: pointer to the VF info 1970 * @msg: pointer to the msg buffer 1971 * 1972 * Return: 0 on success, and other on error. 1973 */ 1974 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1975 { 1976 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1977 struct virtchnl_fdir_add *stat = NULL; 1978 struct virtchnl_fdir_fltr_conf *conf; 1979 enum virtchnl_status_code v_ret; 1980 struct device *dev; 1981 struct ice_pf *pf; 1982 int is_tun = 0; 1983 int len = 0; 1984 int ret; 1985 1986 pf = vf->pf; 1987 dev = ice_pf_to_dev(pf); 1988 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1989 if (ret) { 1990 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1991 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1992 goto err_exit; 1993 } 1994 1995 ret = ice_vf_start_ctrl_vsi(vf); 1996 if (ret && (ret != -EEXIST)) { 1997 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1998 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1999 vf->vf_id, ret); 2000 goto err_exit; 2001 } 2002 2003 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2004 if (!stat) { 2005 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2006 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2007 goto err_exit; 2008 } 2009 2010 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 2011 if (!conf) { 2012 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2013 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 2014 goto err_exit; 2015 } 2016 2017 len = sizeof(*stat); 2018 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 2019 if (ret) { 2020 v_ret = VIRTCHNL_STATUS_SUCCESS; 2021 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 2022 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 2023 goto err_free_conf; 2024 } 2025 2026 if (fltr->validate_only) { 2027 v_ret = VIRTCHNL_STATUS_SUCCESS; 2028 stat->status = VIRTCHNL_FDIR_SUCCESS; 2029 devm_kfree(dev, conf); 2030 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 2031 v_ret, (u8 *)stat, len); 2032 goto exit; 2033 } 2034 2035 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 2036 if (ret) { 2037 v_ret = VIRTCHNL_STATUS_SUCCESS; 2038 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 2039 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 2040 vf->vf_id, ret); 2041 goto err_free_conf; 2042 } 2043 2044 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 2045 if (ret) { 2046 v_ret = VIRTCHNL_STATUS_SUCCESS; 2047 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 2048 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 2049 vf->vf_id); 2050 goto err_free_conf; 2051 } 2052 2053 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 2054 if (ret) { 2055 v_ret = VIRTCHNL_STATUS_SUCCESS; 2056 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2057 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 2058 goto err_free_conf; 2059 } 2060 2061 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 2062 if (ret) { 2063 v_ret = VIRTCHNL_STATUS_SUCCESS; 2064 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2065 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2066 goto err_free_conf; 2067 } 2068 2069 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 2070 if (ret) { 2071 v_ret = VIRTCHNL_STATUS_SUCCESS; 2072 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2073 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2074 vf->vf_id, ret); 2075 goto err_rem_entry; 2076 } 2077 2078 exit: 2079 kfree(stat); 2080 return ret; 2081 2082 err_rem_entry: 2083 ice_vc_fdir_clear_irq_ctx(vf); 2084 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 2085 err_free_conf: 2086 devm_kfree(dev, conf); 2087 err_exit: 2088 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 2089 (u8 *)stat, len); 2090 kfree(stat); 2091 return ret; 2092 } 2093 2094 /** 2095 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 2096 * @vf: pointer to the VF info 2097 * @msg: pointer to the msg buffer 2098 * 2099 * Return: 0 on success, and other on error. 2100 */ 2101 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 2102 { 2103 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 2104 struct virtchnl_fdir_del *stat = NULL; 2105 struct virtchnl_fdir_fltr_conf *conf; 2106 enum virtchnl_status_code v_ret; 2107 struct device *dev; 2108 struct ice_pf *pf; 2109 int is_tun = 0; 2110 int len = 0; 2111 int ret; 2112 2113 pf = vf->pf; 2114 dev = ice_pf_to_dev(pf); 2115 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 2116 if (ret) { 2117 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2118 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 2119 goto err_exit; 2120 } 2121 2122 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 2123 if (!stat) { 2124 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2125 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 2126 goto err_exit; 2127 } 2128 2129 len = sizeof(*stat); 2130 2131 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 2132 if (!conf) { 2133 v_ret = VIRTCHNL_STATUS_SUCCESS; 2134 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 2135 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 2136 vf->vf_id, fltr->flow_id); 2137 goto err_exit; 2138 } 2139 2140 /* Just return failure when ctrl_vsi idx is invalid */ 2141 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 2142 v_ret = VIRTCHNL_STATUS_SUCCESS; 2143 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2144 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 2145 goto err_exit; 2146 } 2147 2148 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 2149 if (ret) { 2150 v_ret = VIRTCHNL_STATUS_SUCCESS; 2151 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2152 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 2153 goto err_exit; 2154 } 2155 2156 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 2157 if (ret) { 2158 v_ret = VIRTCHNL_STATUS_SUCCESS; 2159 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 2160 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 2161 vf->vf_id, ret); 2162 goto err_del_tmr; 2163 } 2164 2165 kfree(stat); 2166 2167 return ret; 2168 2169 err_del_tmr: 2170 ice_vc_fdir_clear_irq_ctx(vf); 2171 err_exit: 2172 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 2173 (u8 *)stat, len); 2174 kfree(stat); 2175 return ret; 2176 } 2177 2178 /** 2179 * ice_vf_fdir_init - init FDIR resource for VF 2180 * @vf: pointer to the VF info 2181 */ 2182 void ice_vf_fdir_init(struct ice_vf *vf) 2183 { 2184 struct ice_vf_fdir *fdir = &vf->fdir; 2185 2186 idr_init(&fdir->fdir_rule_idr); 2187 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2188 2189 spin_lock_init(&fdir->ctx_lock); 2190 fdir->ctx_irq.flags = 0; 2191 fdir->ctx_done.flags = 0; 2192 } 2193 2194 /** 2195 * ice_vf_fdir_exit - destroy FDIR resource for VF 2196 * @vf: pointer to the VF info 2197 */ 2198 void ice_vf_fdir_exit(struct ice_vf *vf) 2199 { 2200 ice_vc_fdir_flush_entry(vf); 2201 idr_destroy(&vf->fdir.fdir_rule_idr); 2202 ice_vc_fdir_rem_prof_all(vf); 2203 ice_vc_fdir_free_prof_all(vf); 2204 } 2205