1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define ICE_FLOW_PROF_TYPE_S 0 14 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 15 #define ICE_FLOW_PROF_VSI_S 32 16 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 17 18 /* Flow profile ID format: 19 * [0:31] - flow type, flow + tun_offs 20 * [32:63] - VSI index 21 */ 22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 24 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 25 26 #define GTPU_TEID_OFFSET 4 27 #define GTPU_EH_QFI_OFFSET 1 28 #define GTPU_EH_QFI_MASK 0x3F 29 #define PFCP_S_OFFSET 0 30 #define PFCP_S_MASK 0x1 31 #define PFCP_PORT_NR 8805 32 33 #define FDIR_INSET_FLAG_ESP_S 0 34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 37 38 enum ice_fdir_tunnel_type { 39 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 40 ICE_FDIR_TUNNEL_TYPE_GTPU, 41 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 42 }; 43 44 struct virtchnl_fdir_fltr_conf { 45 struct ice_fdir_fltr input; 46 enum ice_fdir_tunnel_type ttype; 47 u64 inset_flag; 48 u32 flow_id; 49 }; 50 51 struct virtchnl_fdir_inset_map { 52 enum virtchnl_proto_hdr_field field; 53 enum ice_flow_field fld; 54 u64 flag; 55 u64 mask; 56 }; 57 58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 59 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 79 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 80 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 81 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 82 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 85 }; 86 87 /** 88 * ice_vc_fdir_param_check 89 * @vf: pointer to the VF structure 90 * @vsi_id: VF relative VSI ID 91 * 92 * Check for the valid VSI ID, PF's state and VF's state 93 * 94 * Return: 0 on success, and -EINVAL on error. 95 */ 96 static int 97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 98 { 99 struct ice_pf *pf = vf->pf; 100 101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 102 return -EINVAL; 103 104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 105 return -EINVAL; 106 107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 108 return -EINVAL; 109 110 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 111 return -EINVAL; 112 113 if (!ice_get_vf_vsi(vf)) 114 return -EINVAL; 115 116 return 0; 117 } 118 119 /** 120 * ice_vf_start_ctrl_vsi 121 * @vf: pointer to the VF structure 122 * 123 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 124 * 125 * Return: 0 on success, and other on error. 126 */ 127 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 128 { 129 struct ice_pf *pf = vf->pf; 130 struct ice_vsi *ctrl_vsi; 131 struct device *dev; 132 int err; 133 134 dev = ice_pf_to_dev(pf); 135 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 136 return -EEXIST; 137 138 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 139 if (!ctrl_vsi) { 140 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 141 vf->vf_id); 142 return -ENOMEM; 143 } 144 145 err = ice_vsi_open_ctrl(ctrl_vsi); 146 if (err) { 147 dev_dbg(dev, "Could not open control VSI for VF %d\n", 148 vf->vf_id); 149 goto err_vsi_open; 150 } 151 152 return 0; 153 154 err_vsi_open: 155 ice_vsi_release(ctrl_vsi); 156 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 157 pf->vsi[vf->ctrl_vsi_idx] = NULL; 158 vf->ctrl_vsi_idx = ICE_NO_VSI; 159 } 160 return err; 161 } 162 163 /** 164 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 165 * @vf: pointer to the VF structure 166 * @flow: filter flow type 167 * 168 * Return: 0 on success, and other on error. 169 */ 170 static int 171 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 172 { 173 struct ice_vf_fdir *fdir = &vf->fdir; 174 175 if (!fdir->fdir_prof) { 176 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 177 ICE_FLTR_PTYPE_MAX, 178 sizeof(*fdir->fdir_prof), 179 GFP_KERNEL); 180 if (!fdir->fdir_prof) 181 return -ENOMEM; 182 } 183 184 if (!fdir->fdir_prof[flow]) { 185 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 186 sizeof(**fdir->fdir_prof), 187 GFP_KERNEL); 188 if (!fdir->fdir_prof[flow]) 189 return -ENOMEM; 190 } 191 192 return 0; 193 } 194 195 /** 196 * ice_vc_fdir_free_prof - free profile for this filter flow type 197 * @vf: pointer to the VF structure 198 * @flow: filter flow type 199 */ 200 static void 201 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 202 { 203 struct ice_vf_fdir *fdir = &vf->fdir; 204 205 if (!fdir->fdir_prof) 206 return; 207 208 if (!fdir->fdir_prof[flow]) 209 return; 210 211 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 212 fdir->fdir_prof[flow] = NULL; 213 } 214 215 /** 216 * ice_vc_fdir_free_prof_all - free all the profile for this VF 217 * @vf: pointer to the VF structure 218 */ 219 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 220 { 221 struct ice_vf_fdir *fdir = &vf->fdir; 222 enum ice_fltr_ptype flow; 223 224 if (!fdir->fdir_prof) 225 return; 226 227 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 228 ice_vc_fdir_free_prof(vf, flow); 229 230 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 231 fdir->fdir_prof = NULL; 232 } 233 234 /** 235 * ice_vc_fdir_parse_flow_fld 236 * @proto_hdr: virtual channel protocol filter header 237 * @conf: FDIR configuration for each filter 238 * @fld: field type array 239 * @fld_cnt: field counter 240 * 241 * Parse the virtual channel filter header and store them into field type array 242 * 243 * Return: 0 on success, and other on error. 244 */ 245 static int 246 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 247 struct virtchnl_fdir_fltr_conf *conf, 248 enum ice_flow_field *fld, int *fld_cnt) 249 { 250 struct virtchnl_proto_hdr hdr; 251 u32 i; 252 253 memcpy(&hdr, proto_hdr, sizeof(hdr)); 254 255 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 256 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 257 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 258 if (fdir_inset_map[i].mask && 259 ((fdir_inset_map[i].mask & conf->inset_flag) != 260 fdir_inset_map[i].flag)) 261 continue; 262 263 fld[*fld_cnt] = fdir_inset_map[i].fld; 264 *fld_cnt += 1; 265 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 266 return -EINVAL; 267 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 268 fdir_inset_map[i].field); 269 } 270 271 return 0; 272 } 273 274 /** 275 * ice_vc_fdir_set_flow_fld 276 * @vf: pointer to the VF structure 277 * @fltr: virtual channel add cmd buffer 278 * @conf: FDIR configuration for each filter 279 * @seg: array of one or more packet segments that describe the flow 280 * 281 * Parse the virtual channel add msg buffer's field vector and store them into 282 * flow's packet segment field 283 * 284 * Return: 0 on success, and other on error. 285 */ 286 static int 287 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 288 struct virtchnl_fdir_fltr_conf *conf, 289 struct ice_flow_seg_info *seg) 290 { 291 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 292 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 293 struct device *dev = ice_pf_to_dev(vf->pf); 294 struct virtchnl_proto_hdrs *proto; 295 int fld_cnt = 0; 296 int i; 297 298 proto = &rule->proto_hdrs; 299 for (i = 0; i < proto->count; i++) { 300 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 301 int ret; 302 303 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 304 if (ret) 305 return ret; 306 } 307 308 if (fld_cnt == 0) { 309 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 310 return -EINVAL; 311 } 312 313 for (i = 0; i < fld_cnt; i++) 314 ice_flow_set_fld(seg, fld[i], 315 ICE_FLOW_FLD_OFF_INVAL, 316 ICE_FLOW_FLD_OFF_INVAL, 317 ICE_FLOW_FLD_OFF_INVAL, false); 318 319 return 0; 320 } 321 322 /** 323 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 324 * @vf: pointer to the VF structure 325 * @conf: FDIR configuration for each filter 326 * @seg: array of one or more packet segments that describe the flow 327 * 328 * Return: 0 on success, and other on error. 329 */ 330 static int 331 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 332 struct virtchnl_fdir_fltr_conf *conf, 333 struct ice_flow_seg_info *seg) 334 { 335 enum ice_fltr_ptype flow = conf->input.flow_type; 336 enum ice_fdir_tunnel_type ttype = conf->ttype; 337 struct device *dev = ice_pf_to_dev(vf->pf); 338 339 switch (flow) { 340 case ICE_FLTR_PTYPE_NON_IP_L2: 341 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 342 break; 343 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 345 ICE_FLOW_SEG_HDR_IPV4 | 346 ICE_FLOW_SEG_HDR_IPV_OTHER); 347 break; 348 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 349 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 350 ICE_FLOW_SEG_HDR_IPV4 | 351 ICE_FLOW_SEG_HDR_IPV_OTHER); 352 break; 353 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 354 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 355 ICE_FLOW_SEG_HDR_IPV4 | 356 ICE_FLOW_SEG_HDR_IPV_OTHER); 357 break; 358 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 359 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 360 ICE_FLOW_SEG_HDR_IPV4 | 361 ICE_FLOW_SEG_HDR_IPV_OTHER); 362 break; 363 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 364 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 365 ICE_FLOW_SEG_HDR_IPV4 | 366 ICE_FLOW_SEG_HDR_IPV_OTHER); 367 break; 368 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 369 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 370 ICE_FLOW_SEG_HDR_IPV4 | 371 ICE_FLOW_SEG_HDR_IPV_OTHER); 372 break; 373 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 374 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 375 ICE_FLOW_SEG_HDR_IPV_OTHER); 376 break; 377 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 378 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 379 ICE_FLOW_SEG_HDR_IPV4 | 380 ICE_FLOW_SEG_HDR_IPV_OTHER); 381 break; 382 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 383 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 384 ICE_FLOW_SEG_HDR_IPV4 | 385 ICE_FLOW_SEG_HDR_IPV_OTHER); 386 break; 387 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 388 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 389 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 391 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 392 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 393 ICE_FLOW_SEG_HDR_IPV4 | 394 ICE_FLOW_SEG_HDR_IPV_OTHER); 395 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 396 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 397 ICE_FLOW_SEG_HDR_GTPU_IP | 398 ICE_FLOW_SEG_HDR_IPV4 | 399 ICE_FLOW_SEG_HDR_IPV_OTHER); 400 } else { 401 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 402 flow, vf->vf_id); 403 return -EINVAL; 404 } 405 break; 406 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 407 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 408 ICE_FLOW_SEG_HDR_IPV4 | 409 ICE_FLOW_SEG_HDR_IPV_OTHER); 410 break; 411 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 412 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 413 ICE_FLOW_SEG_HDR_IPV6 | 414 ICE_FLOW_SEG_HDR_IPV_OTHER); 415 break; 416 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 417 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 418 ICE_FLOW_SEG_HDR_IPV6 | 419 ICE_FLOW_SEG_HDR_IPV_OTHER); 420 break; 421 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 422 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 423 ICE_FLOW_SEG_HDR_IPV6 | 424 ICE_FLOW_SEG_HDR_IPV_OTHER); 425 break; 426 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 427 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 428 ICE_FLOW_SEG_HDR_IPV6 | 429 ICE_FLOW_SEG_HDR_IPV_OTHER); 430 break; 431 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 432 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 433 ICE_FLOW_SEG_HDR_IPV6 | 434 ICE_FLOW_SEG_HDR_IPV_OTHER); 435 break; 436 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 437 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 438 ICE_FLOW_SEG_HDR_IPV6 | 439 ICE_FLOW_SEG_HDR_IPV_OTHER); 440 break; 441 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 442 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 443 ICE_FLOW_SEG_HDR_IPV_OTHER); 444 break; 445 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 446 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 447 ICE_FLOW_SEG_HDR_IPV6 | 448 ICE_FLOW_SEG_HDR_IPV_OTHER); 449 break; 450 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 451 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 452 ICE_FLOW_SEG_HDR_IPV6 | 453 ICE_FLOW_SEG_HDR_IPV_OTHER); 454 break; 455 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 456 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 457 ICE_FLOW_SEG_HDR_IPV6 | 458 ICE_FLOW_SEG_HDR_IPV_OTHER); 459 break; 460 default: 461 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 462 flow, vf->vf_id); 463 return -EINVAL; 464 } 465 466 return 0; 467 } 468 469 /** 470 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 471 * @vf: pointer to the VF structure 472 * @flow: filter flow type 473 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 474 */ 475 static void 476 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 477 { 478 struct ice_vf_fdir *fdir = &vf->fdir; 479 struct ice_fd_hw_prof *vf_prof; 480 struct ice_pf *pf = vf->pf; 481 struct ice_vsi *vf_vsi; 482 struct device *dev; 483 struct ice_hw *hw; 484 u64 prof_id; 485 int i; 486 487 dev = ice_pf_to_dev(pf); 488 hw = &pf->hw; 489 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 490 return; 491 492 vf_prof = fdir->fdir_prof[flow]; 493 494 vf_vsi = ice_get_vf_vsi(vf); 495 if (!vf_vsi) { 496 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 497 return; 498 } 499 500 if (!fdir->prof_entry_cnt[flow][tun]) 501 return; 502 503 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 504 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 505 506 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 507 if (vf_prof->entry_h[i][tun]) { 508 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 509 510 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 511 ice_flow_rem_entry(hw, ICE_BLK_FD, 512 vf_prof->entry_h[i][tun]); 513 vf_prof->entry_h[i][tun] = 0; 514 } 515 516 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 517 devm_kfree(dev, vf_prof->fdir_seg[tun]); 518 vf_prof->fdir_seg[tun] = NULL; 519 520 for (i = 0; i < vf_prof->cnt; i++) 521 vf_prof->vsi_h[i] = 0; 522 523 fdir->prof_entry_cnt[flow][tun] = 0; 524 } 525 526 /** 527 * ice_vc_fdir_rem_prof_all - remove profile for this VF 528 * @vf: pointer to the VF structure 529 */ 530 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 531 { 532 enum ice_fltr_ptype flow; 533 534 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 535 flow < ICE_FLTR_PTYPE_MAX; flow++) { 536 ice_vc_fdir_rem_prof(vf, flow, 0); 537 ice_vc_fdir_rem_prof(vf, flow, 1); 538 } 539 } 540 541 /** 542 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR 543 * @fdir: pointer to the VF FDIR structure 544 */ 545 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) 546 { 547 enum ice_fltr_ptype flow; 548 549 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 550 flow < ICE_FLTR_PTYPE_MAX; flow++) { 551 fdir->fdir_fltr_cnt[flow][0] = 0; 552 fdir->fdir_fltr_cnt[flow][1] = 0; 553 } 554 } 555 556 /** 557 * ice_vc_fdir_has_prof_conflict 558 * @vf: pointer to the VF structure 559 * @conf: FDIR configuration for each filter 560 * 561 * Check if @conf has conflicting profile with existing profiles 562 * 563 * Return: true on success, and false on error. 564 */ 565 static bool 566 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 567 struct virtchnl_fdir_fltr_conf *conf) 568 { 569 struct ice_fdir_fltr *desc; 570 571 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 572 struct virtchnl_fdir_fltr_conf *existing_conf; 573 enum ice_fltr_ptype flow_type_a, flow_type_b; 574 struct ice_fdir_fltr *a, *b; 575 576 existing_conf = to_fltr_conf_from_desc(desc); 577 a = &existing_conf->input; 578 b = &conf->input; 579 flow_type_a = a->flow_type; 580 flow_type_b = b->flow_type; 581 582 /* No need to compare two rules with different tunnel types or 583 * with the same protocol type. 584 */ 585 if (existing_conf->ttype != conf->ttype || 586 flow_type_a == flow_type_b) 587 continue; 588 589 switch (flow_type_a) { 590 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 591 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 592 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 593 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 594 return true; 595 break; 596 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 597 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 598 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 600 return true; 601 break; 602 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 603 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 604 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 605 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 606 return true; 607 break; 608 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 609 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 610 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 611 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 612 return true; 613 break; 614 default: 615 break; 616 } 617 } 618 619 return false; 620 } 621 622 /** 623 * ice_vc_fdir_write_flow_prof 624 * @vf: pointer to the VF structure 625 * @flow: filter flow type 626 * @seg: array of one or more packet segments that describe the flow 627 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 628 * 629 * Write the flow's profile config and packet segment into the hardware 630 * 631 * Return: 0 on success, and other on error. 632 */ 633 static int 634 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 635 struct ice_flow_seg_info *seg, int tun) 636 { 637 struct ice_vf_fdir *fdir = &vf->fdir; 638 struct ice_vsi *vf_vsi, *ctrl_vsi; 639 struct ice_flow_seg_info *old_seg; 640 struct ice_flow_prof *prof = NULL; 641 struct ice_fd_hw_prof *vf_prof; 642 struct device *dev; 643 struct ice_pf *pf; 644 struct ice_hw *hw; 645 u64 entry1_h = 0; 646 u64 entry2_h = 0; 647 u64 prof_id; 648 int ret; 649 650 pf = vf->pf; 651 dev = ice_pf_to_dev(pf); 652 hw = &pf->hw; 653 vf_vsi = ice_get_vf_vsi(vf); 654 if (!vf_vsi) 655 return -EINVAL; 656 657 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 658 if (!ctrl_vsi) 659 return -EINVAL; 660 661 vf_prof = fdir->fdir_prof[flow]; 662 old_seg = vf_prof->fdir_seg[tun]; 663 if (old_seg) { 664 if (!memcmp(old_seg, seg, sizeof(*seg))) { 665 dev_dbg(dev, "Duplicated profile for VF %d!\n", 666 vf->vf_id); 667 return -EEXIST; 668 } 669 670 if (fdir->fdir_fltr_cnt[flow][tun]) { 671 ret = -EINVAL; 672 dev_dbg(dev, "Input set conflicts for VF %d\n", 673 vf->vf_id); 674 goto err_exit; 675 } 676 677 /* remove previously allocated profile */ 678 ice_vc_fdir_rem_prof(vf, flow, tun); 679 } 680 681 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 682 tun ? ICE_FLTR_PTYPE_MAX : 0); 683 684 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 685 tun + 1, &prof); 686 if (ret) { 687 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 688 flow, vf->vf_id); 689 goto err_exit; 690 } 691 692 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 693 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 694 seg, &entry1_h); 695 if (ret) { 696 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 697 flow, vf->vf_id); 698 goto err_prof; 699 } 700 701 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 702 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 703 seg, &entry2_h); 704 if (ret) { 705 dev_dbg(dev, 706 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 707 flow, vf->vf_id); 708 goto err_entry_1; 709 } 710 711 vf_prof->fdir_seg[tun] = seg; 712 vf_prof->cnt = 0; 713 fdir->prof_entry_cnt[flow][tun] = 0; 714 715 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 716 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 717 vf_prof->cnt++; 718 fdir->prof_entry_cnt[flow][tun]++; 719 720 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 721 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 722 vf_prof->cnt++; 723 fdir->prof_entry_cnt[flow][tun]++; 724 725 return 0; 726 727 err_entry_1: 728 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 729 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 730 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 731 err_prof: 732 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 733 err_exit: 734 return ret; 735 } 736 737 /** 738 * ice_vc_fdir_config_input_set 739 * @vf: pointer to the VF structure 740 * @fltr: virtual channel add cmd buffer 741 * @conf: FDIR configuration for each filter 742 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 743 * 744 * Config the input set type and value for virtual channel add msg buffer 745 * 746 * Return: 0 on success, and other on error. 747 */ 748 static int 749 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 750 struct virtchnl_fdir_fltr_conf *conf, int tun) 751 { 752 struct ice_fdir_fltr *input = &conf->input; 753 struct device *dev = ice_pf_to_dev(vf->pf); 754 struct ice_flow_seg_info *seg; 755 enum ice_fltr_ptype flow; 756 int ret; 757 758 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 759 if (ret) { 760 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 761 vf->vf_id); 762 return ret; 763 } 764 765 flow = input->flow_type; 766 ret = ice_vc_fdir_alloc_prof(vf, flow); 767 if (ret) { 768 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 769 return ret; 770 } 771 772 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 773 if (!seg) 774 return -ENOMEM; 775 776 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 777 if (ret) { 778 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 779 goto err_exit; 780 } 781 782 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 783 if (ret) { 784 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 785 goto err_exit; 786 } 787 788 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 789 if (ret == -EEXIST) { 790 devm_kfree(dev, seg); 791 } else if (ret) { 792 dev_dbg(dev, "Write flow profile for VF %d failed\n", 793 vf->vf_id); 794 goto err_exit; 795 } 796 797 return 0; 798 799 err_exit: 800 devm_kfree(dev, seg); 801 return ret; 802 } 803 804 /** 805 * ice_vc_fdir_parse_pattern 806 * @vf: pointer to the VF info 807 * @fltr: virtual channel add cmd buffer 808 * @conf: FDIR configuration for each filter 809 * 810 * Parse the virtual channel filter's pattern and store them into conf 811 * 812 * Return: 0 on success, and other on error. 813 */ 814 static int 815 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 816 struct virtchnl_fdir_fltr_conf *conf) 817 { 818 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 819 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 820 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 821 struct device *dev = ice_pf_to_dev(vf->pf); 822 struct ice_fdir_fltr *input = &conf->input; 823 int i; 824 825 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 826 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 827 proto->count, vf->vf_id); 828 return -EINVAL; 829 } 830 831 for (i = 0; i < proto->count; i++) { 832 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 833 struct ip_esp_hdr *esph; 834 struct ip_auth_hdr *ah; 835 struct sctphdr *sctph; 836 struct ipv6hdr *ip6h; 837 struct udphdr *udph; 838 struct tcphdr *tcph; 839 struct ethhdr *eth; 840 struct iphdr *iph; 841 u8 s_field; 842 u8 *rawh; 843 844 switch (hdr->type) { 845 case VIRTCHNL_PROTO_HDR_ETH: 846 eth = (struct ethhdr *)hdr->buffer; 847 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 848 849 if (hdr->field_selector) 850 input->ext_data.ether_type = eth->h_proto; 851 break; 852 case VIRTCHNL_PROTO_HDR_IPV4: 853 iph = (struct iphdr *)hdr->buffer; 854 l3 = VIRTCHNL_PROTO_HDR_IPV4; 855 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 856 857 if (hdr->field_selector) { 858 input->ip.v4.src_ip = iph->saddr; 859 input->ip.v4.dst_ip = iph->daddr; 860 input->ip.v4.tos = iph->tos; 861 input->ip.v4.proto = iph->protocol; 862 } 863 break; 864 case VIRTCHNL_PROTO_HDR_IPV6: 865 ip6h = (struct ipv6hdr *)hdr->buffer; 866 l3 = VIRTCHNL_PROTO_HDR_IPV6; 867 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 868 869 if (hdr->field_selector) { 870 memcpy(input->ip.v6.src_ip, 871 ip6h->saddr.in6_u.u6_addr8, 872 sizeof(ip6h->saddr)); 873 memcpy(input->ip.v6.dst_ip, 874 ip6h->daddr.in6_u.u6_addr8, 875 sizeof(ip6h->daddr)); 876 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 877 (ip6h->flow_lbl[0] >> 4); 878 input->ip.v6.proto = ip6h->nexthdr; 879 } 880 break; 881 case VIRTCHNL_PROTO_HDR_TCP: 882 tcph = (struct tcphdr *)hdr->buffer; 883 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 884 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 885 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 886 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 887 888 if (hdr->field_selector) { 889 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 890 input->ip.v4.src_port = tcph->source; 891 input->ip.v4.dst_port = tcph->dest; 892 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 893 input->ip.v6.src_port = tcph->source; 894 input->ip.v6.dst_port = tcph->dest; 895 } 896 } 897 break; 898 case VIRTCHNL_PROTO_HDR_UDP: 899 udph = (struct udphdr *)hdr->buffer; 900 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 901 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 902 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 903 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 904 905 if (hdr->field_selector) { 906 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 907 input->ip.v4.src_port = udph->source; 908 input->ip.v4.dst_port = udph->dest; 909 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 910 input->ip.v6.src_port = udph->source; 911 input->ip.v6.dst_port = udph->dest; 912 } 913 } 914 break; 915 case VIRTCHNL_PROTO_HDR_SCTP: 916 sctph = (struct sctphdr *)hdr->buffer; 917 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 918 input->flow_type = 919 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 920 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 921 input->flow_type = 922 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 923 924 if (hdr->field_selector) { 925 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 926 input->ip.v4.src_port = sctph->source; 927 input->ip.v4.dst_port = sctph->dest; 928 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 929 input->ip.v6.src_port = sctph->source; 930 input->ip.v6.dst_port = sctph->dest; 931 } 932 } 933 break; 934 case VIRTCHNL_PROTO_HDR_L2TPV3: 935 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 936 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 937 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 938 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 939 940 if (hdr->field_selector) 941 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 942 break; 943 case VIRTCHNL_PROTO_HDR_ESP: 944 esph = (struct ip_esp_hdr *)hdr->buffer; 945 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 946 l4 == VIRTCHNL_PROTO_HDR_UDP) 947 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 948 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 949 l4 == VIRTCHNL_PROTO_HDR_UDP) 950 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 951 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 952 l4 == VIRTCHNL_PROTO_HDR_NONE) 953 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 954 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 955 l4 == VIRTCHNL_PROTO_HDR_NONE) 956 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 957 958 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 959 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 960 else 961 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 962 963 if (hdr->field_selector) { 964 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 965 input->ip.v4.sec_parm_idx = esph->spi; 966 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 967 input->ip.v6.sec_parm_idx = esph->spi; 968 } 969 break; 970 case VIRTCHNL_PROTO_HDR_AH: 971 ah = (struct ip_auth_hdr *)hdr->buffer; 972 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 973 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 974 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 975 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 976 977 if (hdr->field_selector) { 978 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 979 input->ip.v4.sec_parm_idx = ah->spi; 980 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 981 input->ip.v6.sec_parm_idx = ah->spi; 982 } 983 break; 984 case VIRTCHNL_PROTO_HDR_PFCP: 985 rawh = (u8 *)hdr->buffer; 986 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 987 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 988 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 989 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 990 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 991 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 992 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 993 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 994 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 995 996 if (hdr->field_selector) { 997 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 998 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 999 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1000 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1001 } 1002 break; 1003 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1004 rawh = (u8 *)hdr->buffer; 1005 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1006 1007 if (hdr->field_selector) 1008 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1009 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1010 break; 1011 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1012 rawh = (u8 *)hdr->buffer; 1013 1014 if (hdr->field_selector) 1015 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1016 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1017 break; 1018 default: 1019 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1020 hdr->type, vf->vf_id); 1021 return -EINVAL; 1022 } 1023 } 1024 1025 return 0; 1026 } 1027 1028 /** 1029 * ice_vc_fdir_parse_action 1030 * @vf: pointer to the VF info 1031 * @fltr: virtual channel add cmd buffer 1032 * @conf: FDIR configuration for each filter 1033 * 1034 * Parse the virtual channel filter's action and store them into conf 1035 * 1036 * Return: 0 on success, and other on error. 1037 */ 1038 static int 1039 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1040 struct virtchnl_fdir_fltr_conf *conf) 1041 { 1042 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1043 struct device *dev = ice_pf_to_dev(vf->pf); 1044 struct ice_fdir_fltr *input = &conf->input; 1045 u32 dest_num = 0; 1046 u32 mark_num = 0; 1047 int i; 1048 1049 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1050 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1051 as->count, vf->vf_id); 1052 return -EINVAL; 1053 } 1054 1055 for (i = 0; i < as->count; i++) { 1056 struct virtchnl_filter_action *action = &as->actions[i]; 1057 1058 switch (action->type) { 1059 case VIRTCHNL_ACTION_PASSTHRU: 1060 dest_num++; 1061 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1062 break; 1063 case VIRTCHNL_ACTION_DROP: 1064 dest_num++; 1065 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1066 break; 1067 case VIRTCHNL_ACTION_QUEUE: 1068 dest_num++; 1069 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1070 input->q_index = action->act_conf.queue.index; 1071 break; 1072 case VIRTCHNL_ACTION_Q_REGION: 1073 dest_num++; 1074 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1075 input->q_index = action->act_conf.queue.index; 1076 input->q_region = action->act_conf.queue.region; 1077 break; 1078 case VIRTCHNL_ACTION_MARK: 1079 mark_num++; 1080 input->fltr_id = action->act_conf.mark_id; 1081 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1082 break; 1083 default: 1084 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1085 action->type, vf->vf_id); 1086 return -EINVAL; 1087 } 1088 } 1089 1090 if (dest_num == 0 || dest_num >= 2) { 1091 dev_dbg(dev, "Invalid destination action for VF %d\n", 1092 vf->vf_id); 1093 return -EINVAL; 1094 } 1095 1096 if (mark_num >= 2) { 1097 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1098 return -EINVAL; 1099 } 1100 1101 return 0; 1102 } 1103 1104 /** 1105 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1106 * @vf: pointer to the VF info 1107 * @fltr: virtual channel add cmd buffer 1108 * @conf: FDIR configuration for each filter 1109 * 1110 * Return: 0 on success, and other on error. 1111 */ 1112 static int 1113 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1114 struct virtchnl_fdir_fltr_conf *conf) 1115 { 1116 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1117 int ret; 1118 1119 if (!ice_vc_validate_pattern(vf, proto)) 1120 return -EINVAL; 1121 1122 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1123 if (ret) 1124 return ret; 1125 1126 return ice_vc_fdir_parse_action(vf, fltr, conf); 1127 } 1128 1129 /** 1130 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1131 * @conf_a: FDIR configuration for filter a 1132 * @conf_b: FDIR configuration for filter b 1133 * 1134 * Return: 0 on success, and other on error. 1135 */ 1136 static bool 1137 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1138 struct virtchnl_fdir_fltr_conf *conf_b) 1139 { 1140 struct ice_fdir_fltr *a = &conf_a->input; 1141 struct ice_fdir_fltr *b = &conf_b->input; 1142 1143 if (conf_a->ttype != conf_b->ttype) 1144 return false; 1145 if (a->flow_type != b->flow_type) 1146 return false; 1147 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1148 return false; 1149 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1150 return false; 1151 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1152 return false; 1153 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1154 return false; 1155 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1156 return false; 1157 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1158 return false; 1159 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1160 return false; 1161 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1162 return false; 1163 1164 return true; 1165 } 1166 1167 /** 1168 * ice_vc_fdir_is_dup_fltr 1169 * @vf: pointer to the VF info 1170 * @conf: FDIR configuration for each filter 1171 * 1172 * Check if there is duplicated rule with same conf value 1173 * 1174 * Return: 0 true success, and false on error. 1175 */ 1176 static bool 1177 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1178 { 1179 struct ice_fdir_fltr *desc; 1180 bool ret; 1181 1182 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1183 struct virtchnl_fdir_fltr_conf *node = 1184 to_fltr_conf_from_desc(desc); 1185 1186 ret = ice_vc_fdir_comp_rules(node, conf); 1187 if (ret) 1188 return true; 1189 } 1190 1191 return false; 1192 } 1193 1194 /** 1195 * ice_vc_fdir_insert_entry 1196 * @vf: pointer to the VF info 1197 * @conf: FDIR configuration for each filter 1198 * @id: pointer to ID value allocated by driver 1199 * 1200 * Insert FDIR conf entry into list and allocate ID for this filter 1201 * 1202 * Return: 0 true success, and other on error. 1203 */ 1204 static int 1205 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1206 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1207 { 1208 struct ice_fdir_fltr *input = &conf->input; 1209 int i; 1210 1211 /* alloc ID corresponding with conf */ 1212 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1213 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1214 if (i < 0) 1215 return -EINVAL; 1216 *id = i; 1217 1218 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1219 return 0; 1220 } 1221 1222 /** 1223 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1224 * @vf: pointer to the VF info 1225 * @conf: FDIR configuration for each filter 1226 * @id: filter rule's ID 1227 */ 1228 static void 1229 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1230 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1231 { 1232 struct ice_fdir_fltr *input = &conf->input; 1233 1234 idr_remove(&vf->fdir.fdir_rule_idr, id); 1235 list_del(&input->fltr_node); 1236 } 1237 1238 /** 1239 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1240 * @vf: pointer to the VF info 1241 * @id: filter rule's ID 1242 * 1243 * Return: NULL on error, and other on success. 1244 */ 1245 static struct virtchnl_fdir_fltr_conf * 1246 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1247 { 1248 return idr_find(&vf->fdir.fdir_rule_idr, id); 1249 } 1250 1251 /** 1252 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1253 * @vf: pointer to the VF info 1254 */ 1255 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1256 { 1257 struct virtchnl_fdir_fltr_conf *conf; 1258 struct ice_fdir_fltr *desc, *temp; 1259 1260 list_for_each_entry_safe(desc, temp, 1261 &vf->fdir.fdir_rule_list, fltr_node) { 1262 conf = to_fltr_conf_from_desc(desc); 1263 list_del(&desc->fltr_node); 1264 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1265 } 1266 } 1267 1268 /** 1269 * ice_vc_fdir_write_fltr - write filter rule into hardware 1270 * @vf: pointer to the VF info 1271 * @conf: FDIR configuration for each filter 1272 * @add: true implies add rule, false implies del rules 1273 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1274 * 1275 * Return: 0 on success, and other on error. 1276 */ 1277 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1278 struct virtchnl_fdir_fltr_conf *conf, 1279 bool add, bool is_tun) 1280 { 1281 struct ice_fdir_fltr *input = &conf->input; 1282 struct ice_vsi *vsi, *ctrl_vsi; 1283 struct ice_fltr_desc desc; 1284 struct device *dev; 1285 struct ice_pf *pf; 1286 struct ice_hw *hw; 1287 int ret; 1288 u8 *pkt; 1289 1290 pf = vf->pf; 1291 dev = ice_pf_to_dev(pf); 1292 hw = &pf->hw; 1293 vsi = ice_get_vf_vsi(vf); 1294 if (!vsi) { 1295 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1296 return -EINVAL; 1297 } 1298 1299 input->dest_vsi = vsi->idx; 1300 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1301 1302 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1303 if (!ctrl_vsi) { 1304 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1305 return -EINVAL; 1306 } 1307 1308 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1309 if (!pkt) 1310 return -ENOMEM; 1311 1312 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1313 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1314 if (ret) { 1315 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1316 vf->vf_id, input->flow_type); 1317 goto err_free_pkt; 1318 } 1319 1320 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1321 if (ret) 1322 goto err_free_pkt; 1323 1324 return 0; 1325 1326 err_free_pkt: 1327 devm_kfree(dev, pkt); 1328 return ret; 1329 } 1330 1331 /** 1332 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1333 * @t: pointer to timer_list 1334 */ 1335 static void ice_vf_fdir_timer(struct timer_list *t) 1336 { 1337 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1338 struct ice_vf_fdir_ctx *ctx_done; 1339 struct ice_vf_fdir *fdir; 1340 unsigned long flags; 1341 struct ice_vf *vf; 1342 struct ice_pf *pf; 1343 1344 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1345 vf = container_of(fdir, struct ice_vf, fdir); 1346 ctx_done = &fdir->ctx_done; 1347 pf = vf->pf; 1348 spin_lock_irqsave(&fdir->ctx_lock, flags); 1349 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1350 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1351 WARN_ON_ONCE(1); 1352 return; 1353 } 1354 1355 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1356 1357 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1358 ctx_done->conf = ctx_irq->conf; 1359 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1360 ctx_done->v_opcode = ctx_irq->v_opcode; 1361 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1362 1363 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1364 ice_service_task_schedule(pf); 1365 } 1366 1367 /** 1368 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1369 * @ctrl_vsi: pointer to a VF's CTRL VSI 1370 * @rx_desc: pointer to FDIR Rx queue descriptor 1371 */ 1372 void 1373 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1374 union ice_32b_rx_flex_desc *rx_desc) 1375 { 1376 struct ice_pf *pf = ctrl_vsi->back; 1377 struct ice_vf *vf = ctrl_vsi->vf; 1378 struct ice_vf_fdir_ctx *ctx_done; 1379 struct ice_vf_fdir_ctx *ctx_irq; 1380 struct ice_vf_fdir *fdir; 1381 unsigned long flags; 1382 struct device *dev; 1383 int ret; 1384 1385 if (WARN_ON(!vf)) 1386 return; 1387 1388 fdir = &vf->fdir; 1389 ctx_done = &fdir->ctx_done; 1390 ctx_irq = &fdir->ctx_irq; 1391 dev = ice_pf_to_dev(pf); 1392 spin_lock_irqsave(&fdir->ctx_lock, flags); 1393 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1394 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1395 WARN_ON_ONCE(1); 1396 return; 1397 } 1398 1399 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1400 1401 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1402 ctx_done->conf = ctx_irq->conf; 1403 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1404 ctx_done->v_opcode = ctx_irq->v_opcode; 1405 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1406 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1407 1408 ret = del_timer(&ctx_irq->rx_tmr); 1409 if (!ret) 1410 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1411 1412 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1413 ice_service_task_schedule(pf); 1414 } 1415 1416 /** 1417 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1418 * @vf: pointer to the VF info 1419 */ 1420 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1421 { 1422 struct ice_vsi *vf_vsi; 1423 u32 fd_size, fd_cnt; 1424 struct device *dev; 1425 struct ice_pf *pf; 1426 struct ice_hw *hw; 1427 u16 vsi_num; 1428 1429 pf = vf->pf; 1430 hw = &pf->hw; 1431 dev = ice_pf_to_dev(pf); 1432 vf_vsi = ice_get_vf_vsi(vf); 1433 if (!vf_vsi) { 1434 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1435 return; 1436 } 1437 1438 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1439 1440 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1441 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1442 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", 1443 vf->vf_id, 1444 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1445 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1446 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1447 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1448 } 1449 1450 /** 1451 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1452 * @vf: pointer to the VF info 1453 * @ctx: FDIR context info for post processing 1454 * @status: virtchnl FDIR program status 1455 * 1456 * Return: 0 on success, and other on error. 1457 */ 1458 static int 1459 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1460 enum virtchnl_fdir_prgm_status *status) 1461 { 1462 struct device *dev = ice_pf_to_dev(vf->pf); 1463 u32 stat_err, error, prog_id; 1464 int ret; 1465 1466 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1467 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1468 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1469 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1470 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1471 ret = -EINVAL; 1472 goto err_exit; 1473 } 1474 1475 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1476 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1477 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1478 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1479 dev_err(dev, "VF %d: Desc show add, but ctx not", 1480 vf->vf_id); 1481 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1482 ret = -EINVAL; 1483 goto err_exit; 1484 } 1485 1486 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1487 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1488 dev_err(dev, "VF %d: Desc show del, but ctx not", 1489 vf->vf_id); 1490 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1491 ret = -EINVAL; 1492 goto err_exit; 1493 } 1494 1495 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1496 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1497 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1498 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1499 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1500 vf->vf_id); 1501 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1502 } else { 1503 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1504 vf->vf_id); 1505 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1506 } 1507 ret = -EINVAL; 1508 goto err_exit; 1509 } 1510 1511 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1512 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1513 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1514 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1515 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1516 ret = -EINVAL; 1517 goto err_exit; 1518 } 1519 1520 *status = VIRTCHNL_FDIR_SUCCESS; 1521 1522 return 0; 1523 1524 err_exit: 1525 ice_vf_fdir_dump_info(vf); 1526 return ret; 1527 } 1528 1529 /** 1530 * ice_vc_add_fdir_fltr_post 1531 * @vf: pointer to the VF structure 1532 * @ctx: FDIR context info for post processing 1533 * @status: virtchnl FDIR program status 1534 * @success: true implies success, false implies failure 1535 * 1536 * Post process for flow director add command. If success, then do post process 1537 * and send back success msg by virtchnl. Otherwise, do context reversion and 1538 * send back failure msg by virtchnl. 1539 * 1540 * Return: 0 on success, and other on error. 1541 */ 1542 static int 1543 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1544 enum virtchnl_fdir_prgm_status status, 1545 bool success) 1546 { 1547 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1548 struct device *dev = ice_pf_to_dev(vf->pf); 1549 enum virtchnl_status_code v_ret; 1550 struct virtchnl_fdir_add *resp; 1551 int ret, len, is_tun; 1552 1553 v_ret = VIRTCHNL_STATUS_SUCCESS; 1554 len = sizeof(*resp); 1555 resp = kzalloc(len, GFP_KERNEL); 1556 if (!resp) { 1557 len = 0; 1558 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1559 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1560 goto err_exit; 1561 } 1562 1563 if (!success) 1564 goto err_exit; 1565 1566 is_tun = 0; 1567 resp->status = status; 1568 resp->flow_id = conf->flow_id; 1569 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1570 1571 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1572 (u8 *)resp, len); 1573 kfree(resp); 1574 1575 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1576 vf->vf_id, conf->flow_id, 1577 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1578 "add" : "del"); 1579 return ret; 1580 1581 err_exit: 1582 if (resp) 1583 resp->status = status; 1584 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1585 devm_kfree(dev, conf); 1586 1587 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1588 (u8 *)resp, len); 1589 kfree(resp); 1590 return ret; 1591 } 1592 1593 /** 1594 * ice_vc_del_fdir_fltr_post 1595 * @vf: pointer to the VF structure 1596 * @ctx: FDIR context info for post processing 1597 * @status: virtchnl FDIR program status 1598 * @success: true implies success, false implies failure 1599 * 1600 * Post process for flow director del command. If success, then do post process 1601 * and send back success msg by virtchnl. Otherwise, do context reversion and 1602 * send back failure msg by virtchnl. 1603 * 1604 * Return: 0 on success, and other on error. 1605 */ 1606 static int 1607 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1608 enum virtchnl_fdir_prgm_status status, 1609 bool success) 1610 { 1611 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1612 struct device *dev = ice_pf_to_dev(vf->pf); 1613 enum virtchnl_status_code v_ret; 1614 struct virtchnl_fdir_del *resp; 1615 int ret, len, is_tun; 1616 1617 v_ret = VIRTCHNL_STATUS_SUCCESS; 1618 len = sizeof(*resp); 1619 resp = kzalloc(len, GFP_KERNEL); 1620 if (!resp) { 1621 len = 0; 1622 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1623 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1624 goto err_exit; 1625 } 1626 1627 if (!success) 1628 goto err_exit; 1629 1630 is_tun = 0; 1631 resp->status = status; 1632 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1633 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1634 1635 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1636 (u8 *)resp, len); 1637 kfree(resp); 1638 1639 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1640 vf->vf_id, conf->flow_id, 1641 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1642 "add" : "del"); 1643 devm_kfree(dev, conf); 1644 return ret; 1645 1646 err_exit: 1647 if (resp) 1648 resp->status = status; 1649 if (success) 1650 devm_kfree(dev, conf); 1651 1652 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1653 (u8 *)resp, len); 1654 kfree(resp); 1655 return ret; 1656 } 1657 1658 /** 1659 * ice_flush_fdir_ctx 1660 * @pf: pointer to the PF structure 1661 * 1662 * Flush all the pending event on ctx_done list and process them. 1663 */ 1664 void ice_flush_fdir_ctx(struct ice_pf *pf) 1665 { 1666 struct ice_vf *vf; 1667 unsigned int bkt; 1668 1669 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1670 return; 1671 1672 mutex_lock(&pf->vfs.table_lock); 1673 ice_for_each_vf(pf, bkt, vf) { 1674 struct device *dev = ice_pf_to_dev(pf); 1675 enum virtchnl_fdir_prgm_status status; 1676 struct ice_vf_fdir_ctx *ctx; 1677 unsigned long flags; 1678 int ret; 1679 1680 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1681 continue; 1682 1683 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1684 continue; 1685 1686 ctx = &vf->fdir.ctx_done; 1687 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1688 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1689 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1690 continue; 1691 } 1692 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1693 1694 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1695 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1696 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1697 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1698 vf->vf_id); 1699 goto err_exit; 1700 } 1701 1702 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1703 if (ret) 1704 goto err_exit; 1705 1706 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1707 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1708 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1709 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1710 else 1711 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1712 1713 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1714 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1715 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1716 continue; 1717 err_exit: 1718 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1719 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1720 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1721 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1722 else 1723 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1724 1725 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1726 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1727 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1728 } 1729 mutex_unlock(&pf->vfs.table_lock); 1730 } 1731 1732 /** 1733 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1734 * @vf: pointer to the VF structure 1735 * @conf: FDIR configuration for each filter 1736 * @v_opcode: virtual channel operation code 1737 * 1738 * Return: 0 on success, and other on error. 1739 */ 1740 static int 1741 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1742 enum virtchnl_ops v_opcode) 1743 { 1744 struct device *dev = ice_pf_to_dev(vf->pf); 1745 struct ice_vf_fdir_ctx *ctx; 1746 unsigned long flags; 1747 1748 ctx = &vf->fdir.ctx_irq; 1749 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1750 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1751 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1752 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1753 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1754 vf->vf_id); 1755 return -EBUSY; 1756 } 1757 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1758 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1759 1760 ctx->conf = conf; 1761 ctx->v_opcode = v_opcode; 1762 ctx->stat = ICE_FDIR_CTX_READY; 1763 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1764 1765 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1766 1767 return 0; 1768 } 1769 1770 /** 1771 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1772 * @vf: pointer to the VF structure 1773 * 1774 * Return: 0 on success, and other on error. 1775 */ 1776 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1777 { 1778 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1779 unsigned long flags; 1780 1781 del_timer(&ctx->rx_tmr); 1782 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1783 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1784 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1785 } 1786 1787 /** 1788 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1789 * @vf: pointer to the VF info 1790 * @msg: pointer to the msg buffer 1791 * 1792 * Return: 0 on success, and other on error. 1793 */ 1794 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1795 { 1796 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1797 struct virtchnl_fdir_add *stat = NULL; 1798 struct virtchnl_fdir_fltr_conf *conf; 1799 enum virtchnl_status_code v_ret; 1800 struct device *dev; 1801 struct ice_pf *pf; 1802 int is_tun = 0; 1803 int len = 0; 1804 int ret; 1805 1806 pf = vf->pf; 1807 dev = ice_pf_to_dev(pf); 1808 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1809 if (ret) { 1810 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1811 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1812 goto err_exit; 1813 } 1814 1815 ret = ice_vf_start_ctrl_vsi(vf); 1816 if (ret && (ret != -EEXIST)) { 1817 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1818 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1819 vf->vf_id, ret); 1820 goto err_exit; 1821 } 1822 1823 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1824 if (!stat) { 1825 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1826 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1827 goto err_exit; 1828 } 1829 1830 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1831 if (!conf) { 1832 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1833 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1834 goto err_exit; 1835 } 1836 1837 len = sizeof(*stat); 1838 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1839 if (ret) { 1840 v_ret = VIRTCHNL_STATUS_SUCCESS; 1841 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1842 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1843 goto err_free_conf; 1844 } 1845 1846 if (fltr->validate_only) { 1847 v_ret = VIRTCHNL_STATUS_SUCCESS; 1848 stat->status = VIRTCHNL_FDIR_SUCCESS; 1849 devm_kfree(dev, conf); 1850 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1851 v_ret, (u8 *)stat, len); 1852 goto exit; 1853 } 1854 1855 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1856 if (ret) { 1857 v_ret = VIRTCHNL_STATUS_SUCCESS; 1858 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1859 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1860 vf->vf_id, ret); 1861 goto err_free_conf; 1862 } 1863 1864 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1865 if (ret) { 1866 v_ret = VIRTCHNL_STATUS_SUCCESS; 1867 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1868 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1869 vf->vf_id); 1870 goto err_free_conf; 1871 } 1872 1873 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1874 if (ret) { 1875 v_ret = VIRTCHNL_STATUS_SUCCESS; 1876 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1877 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1878 goto err_free_conf; 1879 } 1880 1881 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1882 if (ret) { 1883 v_ret = VIRTCHNL_STATUS_SUCCESS; 1884 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1885 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1886 goto err_rem_entry; 1887 } 1888 1889 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1890 if (ret) { 1891 v_ret = VIRTCHNL_STATUS_SUCCESS; 1892 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1893 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1894 vf->vf_id, ret); 1895 goto err_clr_irq; 1896 } 1897 1898 exit: 1899 kfree(stat); 1900 return ret; 1901 1902 err_clr_irq: 1903 ice_vc_fdir_clear_irq_ctx(vf); 1904 err_rem_entry: 1905 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1906 err_free_conf: 1907 devm_kfree(dev, conf); 1908 err_exit: 1909 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1910 (u8 *)stat, len); 1911 kfree(stat); 1912 return ret; 1913 } 1914 1915 /** 1916 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1917 * @vf: pointer to the VF info 1918 * @msg: pointer to the msg buffer 1919 * 1920 * Return: 0 on success, and other on error. 1921 */ 1922 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1923 { 1924 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1925 struct virtchnl_fdir_del *stat = NULL; 1926 struct virtchnl_fdir_fltr_conf *conf; 1927 enum virtchnl_status_code v_ret; 1928 struct device *dev; 1929 struct ice_pf *pf; 1930 int is_tun = 0; 1931 int len = 0; 1932 int ret; 1933 1934 pf = vf->pf; 1935 dev = ice_pf_to_dev(pf); 1936 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1937 if (ret) { 1938 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1939 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1940 goto err_exit; 1941 } 1942 1943 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1944 if (!stat) { 1945 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1946 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1947 goto err_exit; 1948 } 1949 1950 len = sizeof(*stat); 1951 1952 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1953 if (!conf) { 1954 v_ret = VIRTCHNL_STATUS_SUCCESS; 1955 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1956 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1957 vf->vf_id, fltr->flow_id); 1958 goto err_exit; 1959 } 1960 1961 /* Just return failure when ctrl_vsi idx is invalid */ 1962 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1963 v_ret = VIRTCHNL_STATUS_SUCCESS; 1964 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1965 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1966 goto err_exit; 1967 } 1968 1969 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1970 if (ret) { 1971 v_ret = VIRTCHNL_STATUS_SUCCESS; 1972 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1973 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1974 goto err_exit; 1975 } 1976 1977 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1978 if (ret) { 1979 v_ret = VIRTCHNL_STATUS_SUCCESS; 1980 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1981 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1982 vf->vf_id, ret); 1983 goto err_del_tmr; 1984 } 1985 1986 kfree(stat); 1987 1988 return ret; 1989 1990 err_del_tmr: 1991 ice_vc_fdir_clear_irq_ctx(vf); 1992 err_exit: 1993 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 1994 (u8 *)stat, len); 1995 kfree(stat); 1996 return ret; 1997 } 1998 1999 /** 2000 * ice_vf_fdir_init - init FDIR resource for VF 2001 * @vf: pointer to the VF info 2002 */ 2003 void ice_vf_fdir_init(struct ice_vf *vf) 2004 { 2005 struct ice_vf_fdir *fdir = &vf->fdir; 2006 2007 idr_init(&fdir->fdir_rule_idr); 2008 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2009 2010 spin_lock_init(&fdir->ctx_lock); 2011 fdir->ctx_irq.flags = 0; 2012 fdir->ctx_done.flags = 0; 2013 ice_vc_fdir_reset_cnt_all(fdir); 2014 } 2015 2016 /** 2017 * ice_vf_fdir_exit - destroy FDIR resource for VF 2018 * @vf: pointer to the VF info 2019 */ 2020 void ice_vf_fdir_exit(struct ice_vf *vf) 2021 { 2022 ice_vc_fdir_flush_entry(vf); 2023 idr_destroy(&vf->fdir.fdir_rule_idr); 2024 ice_vc_fdir_rem_prof_all(vf); 2025 ice_vc_fdir_free_prof_all(vf); 2026 } 2027