1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 9 #define to_fltr_conf_from_desc(p) \ 10 container_of(p, struct virtchnl_fdir_fltr_conf, input) 11 12 #define ICE_FLOW_PROF_TYPE_S 0 13 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 14 #define ICE_FLOW_PROF_VSI_S 32 15 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 16 17 /* Flow profile ID format: 18 * [0:31] - flow type, flow + tun_offs 19 * [32:63] - VSI index 20 */ 21 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 22 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 23 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 24 25 #define GTPU_TEID_OFFSET 4 26 #define GTPU_EH_QFI_OFFSET 1 27 #define GTPU_EH_QFI_MASK 0x3F 28 #define PFCP_S_OFFSET 0 29 #define PFCP_S_MASK 0x1 30 #define PFCP_PORT_NR 8805 31 32 #define FDIR_INSET_FLAG_ESP_S 0 33 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 34 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 36 37 enum ice_fdir_tunnel_type { 38 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 39 ICE_FDIR_TUNNEL_TYPE_GTPU, 40 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 41 }; 42 43 struct virtchnl_fdir_fltr_conf { 44 struct ice_fdir_fltr input; 45 enum ice_fdir_tunnel_type ttype; 46 u64 inset_flag; 47 u32 flow_id; 48 }; 49 50 struct virtchnl_fdir_inset_map { 51 enum virtchnl_proto_hdr_field field; 52 enum ice_flow_field fld; 53 u64 flag; 54 u64 mask; 55 }; 56 57 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 58 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 59 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 78 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 79 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 80 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 81 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 82 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 84 }; 85 86 /** 87 * ice_vc_fdir_param_check 88 * @vf: pointer to the VF structure 89 * @vsi_id: VF relative VSI ID 90 * 91 * Check for the valid VSI ID, PF's state and VF's state 92 * 93 * Return: 0 on success, and -EINVAL on error. 94 */ 95 static int 96 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 97 { 98 struct ice_pf *pf = vf->pf; 99 100 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 101 return -EINVAL; 102 103 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 104 return -EINVAL; 105 106 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 107 return -EINVAL; 108 109 if (vsi_id != vf->lan_vsi_num) 110 return -EINVAL; 111 112 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 113 return -EINVAL; 114 115 if (!pf->vsi[vf->lan_vsi_idx]) 116 return -EINVAL; 117 118 return 0; 119 } 120 121 /** 122 * ice_vf_start_ctrl_vsi 123 * @vf: pointer to the VF structure 124 * 125 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 126 * 127 * Return: 0 on success, and other on error. 128 */ 129 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 130 { 131 struct ice_pf *pf = vf->pf; 132 struct ice_vsi *ctrl_vsi; 133 struct device *dev; 134 int err; 135 136 dev = ice_pf_to_dev(pf); 137 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 138 return -EEXIST; 139 140 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 141 if (!ctrl_vsi) { 142 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 143 vf->vf_id); 144 return -ENOMEM; 145 } 146 147 err = ice_vsi_open_ctrl(ctrl_vsi); 148 if (err) { 149 dev_dbg(dev, "Could not open control VSI for VF %d\n", 150 vf->vf_id); 151 goto err_vsi_open; 152 } 153 154 return 0; 155 156 err_vsi_open: 157 ice_vsi_release(ctrl_vsi); 158 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 159 pf->vsi[vf->ctrl_vsi_idx] = NULL; 160 vf->ctrl_vsi_idx = ICE_NO_VSI; 161 } 162 return err; 163 } 164 165 /** 166 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 167 * @vf: pointer to the VF structure 168 * @flow: filter flow type 169 * 170 * Return: 0 on success, and other on error. 171 */ 172 static int 173 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 174 { 175 struct ice_vf_fdir *fdir = &vf->fdir; 176 177 if (!fdir->fdir_prof) { 178 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 179 ICE_FLTR_PTYPE_MAX, 180 sizeof(*fdir->fdir_prof), 181 GFP_KERNEL); 182 if (!fdir->fdir_prof) 183 return -ENOMEM; 184 } 185 186 if (!fdir->fdir_prof[flow]) { 187 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 188 sizeof(**fdir->fdir_prof), 189 GFP_KERNEL); 190 if (!fdir->fdir_prof[flow]) 191 return -ENOMEM; 192 } 193 194 return 0; 195 } 196 197 /** 198 * ice_vc_fdir_free_prof - free profile for this filter flow type 199 * @vf: pointer to the VF structure 200 * @flow: filter flow type 201 */ 202 static void 203 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 204 { 205 struct ice_vf_fdir *fdir = &vf->fdir; 206 207 if (!fdir->fdir_prof) 208 return; 209 210 if (!fdir->fdir_prof[flow]) 211 return; 212 213 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 214 fdir->fdir_prof[flow] = NULL; 215 } 216 217 /** 218 * ice_vc_fdir_free_prof_all - free all the profile for this VF 219 * @vf: pointer to the VF structure 220 */ 221 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 222 { 223 struct ice_vf_fdir *fdir = &vf->fdir; 224 enum ice_fltr_ptype flow; 225 226 if (!fdir->fdir_prof) 227 return; 228 229 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 230 ice_vc_fdir_free_prof(vf, flow); 231 232 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 233 fdir->fdir_prof = NULL; 234 } 235 236 /** 237 * ice_vc_fdir_parse_flow_fld 238 * @proto_hdr: virtual channel protocol filter header 239 * @conf: FDIR configuration for each filter 240 * @fld: field type array 241 * @fld_cnt: field counter 242 * 243 * Parse the virtual channel filter header and store them into field type array 244 * 245 * Return: 0 on success, and other on error. 246 */ 247 static int 248 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 249 struct virtchnl_fdir_fltr_conf *conf, 250 enum ice_flow_field *fld, int *fld_cnt) 251 { 252 struct virtchnl_proto_hdr hdr; 253 u32 i; 254 255 memcpy(&hdr, proto_hdr, sizeof(hdr)); 256 257 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 258 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 259 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 260 if (fdir_inset_map[i].mask && 261 ((fdir_inset_map[i].mask & conf->inset_flag) != 262 fdir_inset_map[i].flag)) 263 continue; 264 265 fld[*fld_cnt] = fdir_inset_map[i].fld; 266 *fld_cnt += 1; 267 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 268 return -EINVAL; 269 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 270 fdir_inset_map[i].field); 271 } 272 273 return 0; 274 } 275 276 /** 277 * ice_vc_fdir_set_flow_fld 278 * @vf: pointer to the VF structure 279 * @fltr: virtual channel add cmd buffer 280 * @conf: FDIR configuration for each filter 281 * @seg: array of one or more packet segments that describe the flow 282 * 283 * Parse the virtual channel add msg buffer's field vector and store them into 284 * flow's packet segment field 285 * 286 * Return: 0 on success, and other on error. 287 */ 288 static int 289 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 290 struct virtchnl_fdir_fltr_conf *conf, 291 struct ice_flow_seg_info *seg) 292 { 293 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 294 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 295 struct device *dev = ice_pf_to_dev(vf->pf); 296 struct virtchnl_proto_hdrs *proto; 297 int fld_cnt = 0; 298 int i; 299 300 proto = &rule->proto_hdrs; 301 for (i = 0; i < proto->count; i++) { 302 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 303 int ret; 304 305 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 306 if (ret) 307 return ret; 308 } 309 310 if (fld_cnt == 0) { 311 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 312 return -EINVAL; 313 } 314 315 for (i = 0; i < fld_cnt; i++) 316 ice_flow_set_fld(seg, fld[i], 317 ICE_FLOW_FLD_OFF_INVAL, 318 ICE_FLOW_FLD_OFF_INVAL, 319 ICE_FLOW_FLD_OFF_INVAL, false); 320 321 return 0; 322 } 323 324 /** 325 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 326 * @vf: pointer to the VF structure 327 * @conf: FDIR configuration for each filter 328 * @seg: array of one or more packet segments that describe the flow 329 * 330 * Return: 0 on success, and other on error. 331 */ 332 static int 333 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 334 struct virtchnl_fdir_fltr_conf *conf, 335 struct ice_flow_seg_info *seg) 336 { 337 enum ice_fltr_ptype flow = conf->input.flow_type; 338 enum ice_fdir_tunnel_type ttype = conf->ttype; 339 struct device *dev = ice_pf_to_dev(vf->pf); 340 341 switch (flow) { 342 case ICE_FLTR_PTYPE_NON_IP_L2: 343 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 344 break; 345 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 346 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 347 ICE_FLOW_SEG_HDR_IPV4 | 348 ICE_FLOW_SEG_HDR_IPV_OTHER); 349 break; 350 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 351 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 352 ICE_FLOW_SEG_HDR_IPV4 | 353 ICE_FLOW_SEG_HDR_IPV_OTHER); 354 break; 355 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 356 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 357 ICE_FLOW_SEG_HDR_IPV4 | 358 ICE_FLOW_SEG_HDR_IPV_OTHER); 359 break; 360 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 361 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 362 ICE_FLOW_SEG_HDR_IPV4 | 363 ICE_FLOW_SEG_HDR_IPV_OTHER); 364 break; 365 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 366 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 367 ICE_FLOW_SEG_HDR_IPV4 | 368 ICE_FLOW_SEG_HDR_IPV_OTHER); 369 break; 370 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 371 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 372 ICE_FLOW_SEG_HDR_IPV4 | 373 ICE_FLOW_SEG_HDR_IPV_OTHER); 374 break; 375 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 376 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 377 ICE_FLOW_SEG_HDR_IPV_OTHER); 378 break; 379 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 380 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 381 ICE_FLOW_SEG_HDR_IPV4 | 382 ICE_FLOW_SEG_HDR_IPV_OTHER); 383 break; 384 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 385 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 386 ICE_FLOW_SEG_HDR_IPV4 | 387 ICE_FLOW_SEG_HDR_IPV_OTHER); 388 break; 389 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 393 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 394 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 395 ICE_FLOW_SEG_HDR_IPV4 | 396 ICE_FLOW_SEG_HDR_IPV_OTHER); 397 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 398 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 399 ICE_FLOW_SEG_HDR_GTPU_IP | 400 ICE_FLOW_SEG_HDR_IPV4 | 401 ICE_FLOW_SEG_HDR_IPV_OTHER); 402 } else { 403 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 404 flow, vf->vf_id); 405 return -EINVAL; 406 } 407 break; 408 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 409 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 410 ICE_FLOW_SEG_HDR_IPV4 | 411 ICE_FLOW_SEG_HDR_IPV_OTHER); 412 break; 413 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 414 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 415 ICE_FLOW_SEG_HDR_IPV6 | 416 ICE_FLOW_SEG_HDR_IPV_OTHER); 417 break; 418 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 419 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 420 ICE_FLOW_SEG_HDR_IPV6 | 421 ICE_FLOW_SEG_HDR_IPV_OTHER); 422 break; 423 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 424 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 425 ICE_FLOW_SEG_HDR_IPV6 | 426 ICE_FLOW_SEG_HDR_IPV_OTHER); 427 break; 428 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 429 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 430 ICE_FLOW_SEG_HDR_IPV6 | 431 ICE_FLOW_SEG_HDR_IPV_OTHER); 432 break; 433 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 434 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 435 ICE_FLOW_SEG_HDR_IPV6 | 436 ICE_FLOW_SEG_HDR_IPV_OTHER); 437 break; 438 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 439 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 440 ICE_FLOW_SEG_HDR_IPV6 | 441 ICE_FLOW_SEG_HDR_IPV_OTHER); 442 break; 443 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 444 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 445 ICE_FLOW_SEG_HDR_IPV_OTHER); 446 break; 447 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 448 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 449 ICE_FLOW_SEG_HDR_IPV6 | 450 ICE_FLOW_SEG_HDR_IPV_OTHER); 451 break; 452 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 453 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 454 ICE_FLOW_SEG_HDR_IPV6 | 455 ICE_FLOW_SEG_HDR_IPV_OTHER); 456 break; 457 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 458 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 459 ICE_FLOW_SEG_HDR_IPV6 | 460 ICE_FLOW_SEG_HDR_IPV_OTHER); 461 break; 462 default: 463 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 464 flow, vf->vf_id); 465 return -EINVAL; 466 } 467 468 return 0; 469 } 470 471 /** 472 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 473 * @vf: pointer to the VF structure 474 * @flow: filter flow type 475 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 476 */ 477 static void 478 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 479 { 480 struct ice_vf_fdir *fdir = &vf->fdir; 481 struct ice_fd_hw_prof *vf_prof; 482 struct ice_pf *pf = vf->pf; 483 struct ice_vsi *vf_vsi; 484 struct device *dev; 485 struct ice_hw *hw; 486 u64 prof_id; 487 int i; 488 489 dev = ice_pf_to_dev(pf); 490 hw = &pf->hw; 491 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 492 return; 493 494 vf_prof = fdir->fdir_prof[flow]; 495 496 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 497 if (!vf_vsi) { 498 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 499 return; 500 } 501 502 if (!fdir->prof_entry_cnt[flow][tun]) 503 return; 504 505 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 506 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 507 508 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 509 if (vf_prof->entry_h[i][tun]) { 510 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 511 512 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 513 ice_flow_rem_entry(hw, ICE_BLK_FD, 514 vf_prof->entry_h[i][tun]); 515 vf_prof->entry_h[i][tun] = 0; 516 } 517 518 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 519 devm_kfree(dev, vf_prof->fdir_seg[tun]); 520 vf_prof->fdir_seg[tun] = NULL; 521 522 for (i = 0; i < vf_prof->cnt; i++) 523 vf_prof->vsi_h[i] = 0; 524 525 fdir->prof_entry_cnt[flow][tun] = 0; 526 } 527 528 /** 529 * ice_vc_fdir_rem_prof_all - remove profile for this VF 530 * @vf: pointer to the VF structure 531 */ 532 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 533 { 534 enum ice_fltr_ptype flow; 535 536 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 537 flow < ICE_FLTR_PTYPE_MAX; flow++) { 538 ice_vc_fdir_rem_prof(vf, flow, 0); 539 ice_vc_fdir_rem_prof(vf, flow, 1); 540 } 541 } 542 543 /** 544 * ice_vc_fdir_write_flow_prof 545 * @vf: pointer to the VF structure 546 * @flow: filter flow type 547 * @seg: array of one or more packet segments that describe the flow 548 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 549 * 550 * Write the flow's profile config and packet segment into the hardware 551 * 552 * Return: 0 on success, and other on error. 553 */ 554 static int 555 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 556 struct ice_flow_seg_info *seg, int tun) 557 { 558 struct ice_vf_fdir *fdir = &vf->fdir; 559 struct ice_vsi *vf_vsi, *ctrl_vsi; 560 struct ice_flow_seg_info *old_seg; 561 struct ice_flow_prof *prof = NULL; 562 struct ice_fd_hw_prof *vf_prof; 563 struct device *dev; 564 struct ice_pf *pf; 565 struct ice_hw *hw; 566 u64 entry1_h = 0; 567 u64 entry2_h = 0; 568 u64 prof_id; 569 int ret; 570 571 pf = vf->pf; 572 dev = ice_pf_to_dev(pf); 573 hw = &pf->hw; 574 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 575 if (!vf_vsi) 576 return -EINVAL; 577 578 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 579 if (!ctrl_vsi) 580 return -EINVAL; 581 582 vf_prof = fdir->fdir_prof[flow]; 583 old_seg = vf_prof->fdir_seg[tun]; 584 if (old_seg) { 585 if (!memcmp(old_seg, seg, sizeof(*seg))) { 586 dev_dbg(dev, "Duplicated profile for VF %d!\n", 587 vf->vf_id); 588 return -EEXIST; 589 } 590 591 if (fdir->fdir_fltr_cnt[flow][tun]) { 592 ret = -EINVAL; 593 dev_dbg(dev, "Input set conflicts for VF %d\n", 594 vf->vf_id); 595 goto err_exit; 596 } 597 598 /* remove previously allocated profile */ 599 ice_vc_fdir_rem_prof(vf, flow, tun); 600 } 601 602 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 603 tun ? ICE_FLTR_PTYPE_MAX : 0); 604 605 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 606 tun + 1, &prof); 607 if (ret) { 608 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 609 flow, vf->vf_id); 610 goto err_exit; 611 } 612 613 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 614 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 615 seg, &entry1_h); 616 if (ret) { 617 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 618 flow, vf->vf_id); 619 goto err_prof; 620 } 621 622 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 623 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 624 seg, &entry2_h); 625 if (ret) { 626 dev_dbg(dev, 627 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 628 flow, vf->vf_id); 629 goto err_entry_1; 630 } 631 632 vf_prof->fdir_seg[tun] = seg; 633 vf_prof->cnt = 0; 634 fdir->prof_entry_cnt[flow][tun] = 0; 635 636 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 637 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 638 vf_prof->cnt++; 639 fdir->prof_entry_cnt[flow][tun]++; 640 641 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 642 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 643 vf_prof->cnt++; 644 fdir->prof_entry_cnt[flow][tun]++; 645 646 return 0; 647 648 err_entry_1: 649 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 650 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 651 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 652 err_prof: 653 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 654 err_exit: 655 return ret; 656 } 657 658 /** 659 * ice_vc_fdir_config_input_set 660 * @vf: pointer to the VF structure 661 * @fltr: virtual channel add cmd buffer 662 * @conf: FDIR configuration for each filter 663 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 664 * 665 * Config the input set type and value for virtual channel add msg buffer 666 * 667 * Return: 0 on success, and other on error. 668 */ 669 static int 670 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 671 struct virtchnl_fdir_fltr_conf *conf, int tun) 672 { 673 struct ice_fdir_fltr *input = &conf->input; 674 struct device *dev = ice_pf_to_dev(vf->pf); 675 struct ice_flow_seg_info *seg; 676 enum ice_fltr_ptype flow; 677 int ret; 678 679 flow = input->flow_type; 680 ret = ice_vc_fdir_alloc_prof(vf, flow); 681 if (ret) { 682 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 683 return ret; 684 } 685 686 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 687 if (!seg) 688 return -ENOMEM; 689 690 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 691 if (ret) { 692 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 693 goto err_exit; 694 } 695 696 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 697 if (ret) { 698 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 699 goto err_exit; 700 } 701 702 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 703 if (ret == -EEXIST) { 704 devm_kfree(dev, seg); 705 } else if (ret) { 706 dev_dbg(dev, "Write flow profile for VF %d failed\n", 707 vf->vf_id); 708 goto err_exit; 709 } 710 711 return 0; 712 713 err_exit: 714 devm_kfree(dev, seg); 715 return ret; 716 } 717 718 /** 719 * ice_vc_fdir_parse_pattern 720 * @vf: pointer to the VF info 721 * @fltr: virtual channel add cmd buffer 722 * @conf: FDIR configuration for each filter 723 * 724 * Parse the virtual channel filter's pattern and store them into conf 725 * 726 * Return: 0 on success, and other on error. 727 */ 728 static int 729 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 730 struct virtchnl_fdir_fltr_conf *conf) 731 { 732 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 733 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 734 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 735 struct device *dev = ice_pf_to_dev(vf->pf); 736 struct ice_fdir_fltr *input = &conf->input; 737 int i; 738 739 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 740 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 741 proto->count, vf->vf_id); 742 return -EINVAL; 743 } 744 745 for (i = 0; i < proto->count; i++) { 746 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 747 struct ip_esp_hdr *esph; 748 struct ip_auth_hdr *ah; 749 struct sctphdr *sctph; 750 struct ipv6hdr *ip6h; 751 struct udphdr *udph; 752 struct tcphdr *tcph; 753 struct ethhdr *eth; 754 struct iphdr *iph; 755 u8 s_field; 756 u8 *rawh; 757 758 switch (hdr->type) { 759 case VIRTCHNL_PROTO_HDR_ETH: 760 eth = (struct ethhdr *)hdr->buffer; 761 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 762 763 if (hdr->field_selector) 764 input->ext_data.ether_type = eth->h_proto; 765 break; 766 case VIRTCHNL_PROTO_HDR_IPV4: 767 iph = (struct iphdr *)hdr->buffer; 768 l3 = VIRTCHNL_PROTO_HDR_IPV4; 769 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 770 771 if (hdr->field_selector) { 772 input->ip.v4.src_ip = iph->saddr; 773 input->ip.v4.dst_ip = iph->daddr; 774 input->ip.v4.tos = iph->tos; 775 input->ip.v4.proto = iph->protocol; 776 } 777 break; 778 case VIRTCHNL_PROTO_HDR_IPV6: 779 ip6h = (struct ipv6hdr *)hdr->buffer; 780 l3 = VIRTCHNL_PROTO_HDR_IPV6; 781 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 782 783 if (hdr->field_selector) { 784 memcpy(input->ip.v6.src_ip, 785 ip6h->saddr.in6_u.u6_addr8, 786 sizeof(ip6h->saddr)); 787 memcpy(input->ip.v6.dst_ip, 788 ip6h->daddr.in6_u.u6_addr8, 789 sizeof(ip6h->daddr)); 790 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 791 (ip6h->flow_lbl[0] >> 4); 792 input->ip.v6.proto = ip6h->nexthdr; 793 } 794 break; 795 case VIRTCHNL_PROTO_HDR_TCP: 796 tcph = (struct tcphdr *)hdr->buffer; 797 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 798 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 799 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 800 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 801 802 if (hdr->field_selector) { 803 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 804 input->ip.v4.src_port = tcph->source; 805 input->ip.v4.dst_port = tcph->dest; 806 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 807 input->ip.v6.src_port = tcph->source; 808 input->ip.v6.dst_port = tcph->dest; 809 } 810 } 811 break; 812 case VIRTCHNL_PROTO_HDR_UDP: 813 udph = (struct udphdr *)hdr->buffer; 814 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 815 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 816 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 817 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 818 819 if (hdr->field_selector) { 820 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 821 input->ip.v4.src_port = udph->source; 822 input->ip.v4.dst_port = udph->dest; 823 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 824 input->ip.v6.src_port = udph->source; 825 input->ip.v6.dst_port = udph->dest; 826 } 827 } 828 break; 829 case VIRTCHNL_PROTO_HDR_SCTP: 830 sctph = (struct sctphdr *)hdr->buffer; 831 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 832 input->flow_type = 833 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 834 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 835 input->flow_type = 836 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 837 838 if (hdr->field_selector) { 839 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 840 input->ip.v4.src_port = sctph->source; 841 input->ip.v4.dst_port = sctph->dest; 842 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 843 input->ip.v6.src_port = sctph->source; 844 input->ip.v6.dst_port = sctph->dest; 845 } 846 } 847 break; 848 case VIRTCHNL_PROTO_HDR_L2TPV3: 849 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 850 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 851 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 852 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 853 854 if (hdr->field_selector) 855 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 856 break; 857 case VIRTCHNL_PROTO_HDR_ESP: 858 esph = (struct ip_esp_hdr *)hdr->buffer; 859 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 860 l4 == VIRTCHNL_PROTO_HDR_UDP) 861 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 862 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 863 l4 == VIRTCHNL_PROTO_HDR_UDP) 864 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 865 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 866 l4 == VIRTCHNL_PROTO_HDR_NONE) 867 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 868 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 869 l4 == VIRTCHNL_PROTO_HDR_NONE) 870 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 871 872 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 873 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 874 else 875 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 876 877 if (hdr->field_selector) { 878 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 879 input->ip.v4.sec_parm_idx = esph->spi; 880 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 881 input->ip.v6.sec_parm_idx = esph->spi; 882 } 883 break; 884 case VIRTCHNL_PROTO_HDR_AH: 885 ah = (struct ip_auth_hdr *)hdr->buffer; 886 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 887 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 888 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 889 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 890 891 if (hdr->field_selector) { 892 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 893 input->ip.v4.sec_parm_idx = ah->spi; 894 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 895 input->ip.v6.sec_parm_idx = ah->spi; 896 } 897 break; 898 case VIRTCHNL_PROTO_HDR_PFCP: 899 rawh = (u8 *)hdr->buffer; 900 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 901 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 902 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 903 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 904 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 905 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 906 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 907 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 908 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 909 910 if (hdr->field_selector) { 911 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 912 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 913 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 914 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 915 } 916 break; 917 case VIRTCHNL_PROTO_HDR_GTPU_IP: 918 rawh = (u8 *)hdr->buffer; 919 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 920 921 if (hdr->field_selector) 922 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 923 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 924 break; 925 case VIRTCHNL_PROTO_HDR_GTPU_EH: 926 rawh = (u8 *)hdr->buffer; 927 928 if (hdr->field_selector) 929 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 930 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 931 break; 932 default: 933 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 934 hdr->type, vf->vf_id); 935 return -EINVAL; 936 } 937 } 938 939 return 0; 940 } 941 942 /** 943 * ice_vc_fdir_parse_action 944 * @vf: pointer to the VF info 945 * @fltr: virtual channel add cmd buffer 946 * @conf: FDIR configuration for each filter 947 * 948 * Parse the virtual channel filter's action and store them into conf 949 * 950 * Return: 0 on success, and other on error. 951 */ 952 static int 953 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 954 struct virtchnl_fdir_fltr_conf *conf) 955 { 956 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 957 struct device *dev = ice_pf_to_dev(vf->pf); 958 struct ice_fdir_fltr *input = &conf->input; 959 u32 dest_num = 0; 960 u32 mark_num = 0; 961 int i; 962 963 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 964 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 965 as->count, vf->vf_id); 966 return -EINVAL; 967 } 968 969 for (i = 0; i < as->count; i++) { 970 struct virtchnl_filter_action *action = &as->actions[i]; 971 972 switch (action->type) { 973 case VIRTCHNL_ACTION_PASSTHRU: 974 dest_num++; 975 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 976 break; 977 case VIRTCHNL_ACTION_DROP: 978 dest_num++; 979 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 980 break; 981 case VIRTCHNL_ACTION_QUEUE: 982 dest_num++; 983 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 984 input->q_index = action->act_conf.queue.index; 985 break; 986 case VIRTCHNL_ACTION_Q_REGION: 987 dest_num++; 988 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 989 input->q_index = action->act_conf.queue.index; 990 input->q_region = action->act_conf.queue.region; 991 break; 992 case VIRTCHNL_ACTION_MARK: 993 mark_num++; 994 input->fltr_id = action->act_conf.mark_id; 995 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 996 break; 997 default: 998 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 999 action->type, vf->vf_id); 1000 return -EINVAL; 1001 } 1002 } 1003 1004 if (dest_num == 0 || dest_num >= 2) { 1005 dev_dbg(dev, "Invalid destination action for VF %d\n", 1006 vf->vf_id); 1007 return -EINVAL; 1008 } 1009 1010 if (mark_num >= 2) { 1011 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1012 return -EINVAL; 1013 } 1014 1015 return 0; 1016 } 1017 1018 /** 1019 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1020 * @vf: pointer to the VF info 1021 * @fltr: virtual channel add cmd buffer 1022 * @conf: FDIR configuration for each filter 1023 * 1024 * Return: 0 on success, and other on error. 1025 */ 1026 static int 1027 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1028 struct virtchnl_fdir_fltr_conf *conf) 1029 { 1030 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1031 int ret; 1032 1033 if (!ice_vc_validate_pattern(vf, proto)) 1034 return -EINVAL; 1035 1036 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1037 if (ret) 1038 return ret; 1039 1040 return ice_vc_fdir_parse_action(vf, fltr, conf); 1041 } 1042 1043 /** 1044 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1045 * @conf_a: FDIR configuration for filter a 1046 * @conf_b: FDIR configuration for filter b 1047 * 1048 * Return: 0 on success, and other on error. 1049 */ 1050 static bool 1051 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1052 struct virtchnl_fdir_fltr_conf *conf_b) 1053 { 1054 struct ice_fdir_fltr *a = &conf_a->input; 1055 struct ice_fdir_fltr *b = &conf_b->input; 1056 1057 if (conf_a->ttype != conf_b->ttype) 1058 return false; 1059 if (a->flow_type != b->flow_type) 1060 return false; 1061 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1062 return false; 1063 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1064 return false; 1065 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1066 return false; 1067 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1068 return false; 1069 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1070 return false; 1071 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1072 return false; 1073 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1074 return false; 1075 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1076 return false; 1077 1078 return true; 1079 } 1080 1081 /** 1082 * ice_vc_fdir_is_dup_fltr 1083 * @vf: pointer to the VF info 1084 * @conf: FDIR configuration for each filter 1085 * 1086 * Check if there is duplicated rule with same conf value 1087 * 1088 * Return: 0 true success, and false on error. 1089 */ 1090 static bool 1091 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1092 { 1093 struct ice_fdir_fltr *desc; 1094 bool ret; 1095 1096 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1097 struct virtchnl_fdir_fltr_conf *node = 1098 to_fltr_conf_from_desc(desc); 1099 1100 ret = ice_vc_fdir_comp_rules(node, conf); 1101 if (ret) 1102 return true; 1103 } 1104 1105 return false; 1106 } 1107 1108 /** 1109 * ice_vc_fdir_insert_entry 1110 * @vf: pointer to the VF info 1111 * @conf: FDIR configuration for each filter 1112 * @id: pointer to ID value allocated by driver 1113 * 1114 * Insert FDIR conf entry into list and allocate ID for this filter 1115 * 1116 * Return: 0 true success, and other on error. 1117 */ 1118 static int 1119 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1120 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1121 { 1122 struct ice_fdir_fltr *input = &conf->input; 1123 int i; 1124 1125 /* alloc ID corresponding with conf */ 1126 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1127 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1128 if (i < 0) 1129 return -EINVAL; 1130 *id = i; 1131 1132 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1133 return 0; 1134 } 1135 1136 /** 1137 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1138 * @vf: pointer to the VF info 1139 * @conf: FDIR configuration for each filter 1140 * @id: filter rule's ID 1141 */ 1142 static void 1143 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1144 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1145 { 1146 struct ice_fdir_fltr *input = &conf->input; 1147 1148 idr_remove(&vf->fdir.fdir_rule_idr, id); 1149 list_del(&input->fltr_node); 1150 } 1151 1152 /** 1153 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1154 * @vf: pointer to the VF info 1155 * @id: filter rule's ID 1156 * 1157 * Return: NULL on error, and other on success. 1158 */ 1159 static struct virtchnl_fdir_fltr_conf * 1160 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1161 { 1162 return idr_find(&vf->fdir.fdir_rule_idr, id); 1163 } 1164 1165 /** 1166 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1167 * @vf: pointer to the VF info 1168 */ 1169 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1170 { 1171 struct virtchnl_fdir_fltr_conf *conf; 1172 struct ice_fdir_fltr *desc, *temp; 1173 1174 list_for_each_entry_safe(desc, temp, 1175 &vf->fdir.fdir_rule_list, fltr_node) { 1176 conf = to_fltr_conf_from_desc(desc); 1177 list_del(&desc->fltr_node); 1178 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1179 } 1180 } 1181 1182 /** 1183 * ice_vc_fdir_write_fltr - write filter rule into hardware 1184 * @vf: pointer to the VF info 1185 * @conf: FDIR configuration for each filter 1186 * @add: true implies add rule, false implies del rules 1187 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1188 * 1189 * Return: 0 on success, and other on error. 1190 */ 1191 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1192 struct virtchnl_fdir_fltr_conf *conf, 1193 bool add, bool is_tun) 1194 { 1195 struct ice_fdir_fltr *input = &conf->input; 1196 struct ice_vsi *vsi, *ctrl_vsi; 1197 struct ice_fltr_desc desc; 1198 struct device *dev; 1199 struct ice_pf *pf; 1200 struct ice_hw *hw; 1201 int ret; 1202 u8 *pkt; 1203 1204 pf = vf->pf; 1205 dev = ice_pf_to_dev(pf); 1206 hw = &pf->hw; 1207 vsi = pf->vsi[vf->lan_vsi_idx]; 1208 if (!vsi) { 1209 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1210 return -EINVAL; 1211 } 1212 1213 input->dest_vsi = vsi->idx; 1214 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1215 1216 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1217 if (!ctrl_vsi) { 1218 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1219 return -EINVAL; 1220 } 1221 1222 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1223 if (!pkt) 1224 return -ENOMEM; 1225 1226 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1227 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1228 if (ret) { 1229 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1230 vf->vf_id, input->flow_type); 1231 goto err_free_pkt; 1232 } 1233 1234 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1235 if (ret) 1236 goto err_free_pkt; 1237 1238 return 0; 1239 1240 err_free_pkt: 1241 devm_kfree(dev, pkt); 1242 return ret; 1243 } 1244 1245 /** 1246 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1247 * @t: pointer to timer_list 1248 */ 1249 static void ice_vf_fdir_timer(struct timer_list *t) 1250 { 1251 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1252 struct ice_vf_fdir_ctx *ctx_done; 1253 struct ice_vf_fdir *fdir; 1254 unsigned long flags; 1255 struct ice_vf *vf; 1256 struct ice_pf *pf; 1257 1258 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1259 vf = container_of(fdir, struct ice_vf, fdir); 1260 ctx_done = &fdir->ctx_done; 1261 pf = vf->pf; 1262 spin_lock_irqsave(&fdir->ctx_lock, flags); 1263 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1264 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1265 WARN_ON_ONCE(1); 1266 return; 1267 } 1268 1269 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1270 1271 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1272 ctx_done->conf = ctx_irq->conf; 1273 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1274 ctx_done->v_opcode = ctx_irq->v_opcode; 1275 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1276 1277 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1278 ice_service_task_schedule(pf); 1279 } 1280 1281 /** 1282 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1283 * @ctrl_vsi: pointer to a VF's CTRL VSI 1284 * @rx_desc: pointer to FDIR Rx queue descriptor 1285 */ 1286 void 1287 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1288 union ice_32b_rx_flex_desc *rx_desc) 1289 { 1290 struct ice_pf *pf = ctrl_vsi->back; 1291 struct ice_vf_fdir_ctx *ctx_done; 1292 struct ice_vf_fdir_ctx *ctx_irq; 1293 struct ice_vf_fdir *fdir; 1294 unsigned long flags; 1295 struct device *dev; 1296 struct ice_vf *vf; 1297 int ret; 1298 1299 vf = &pf->vf[ctrl_vsi->vf_id]; 1300 1301 fdir = &vf->fdir; 1302 ctx_done = &fdir->ctx_done; 1303 ctx_irq = &fdir->ctx_irq; 1304 dev = ice_pf_to_dev(pf); 1305 spin_lock_irqsave(&fdir->ctx_lock, flags); 1306 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1307 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1308 WARN_ON_ONCE(1); 1309 return; 1310 } 1311 1312 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1313 1314 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1315 ctx_done->conf = ctx_irq->conf; 1316 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1317 ctx_done->v_opcode = ctx_irq->v_opcode; 1318 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1319 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1320 1321 ret = del_timer(&ctx_irq->rx_tmr); 1322 if (!ret) 1323 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1324 1325 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1326 ice_service_task_schedule(pf); 1327 } 1328 1329 /** 1330 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1331 * @vf: pointer to the VF info 1332 */ 1333 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1334 { 1335 struct ice_vsi *vf_vsi; 1336 u32 fd_size, fd_cnt; 1337 struct device *dev; 1338 struct ice_pf *pf; 1339 struct ice_hw *hw; 1340 u16 vsi_num; 1341 1342 pf = vf->pf; 1343 hw = &pf->hw; 1344 dev = ice_pf_to_dev(pf); 1345 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 1346 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1347 1348 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1349 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1350 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", 1351 vf->vf_id, 1352 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1353 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1354 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1355 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1356 } 1357 1358 /** 1359 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1360 * @vf: pointer to the VF info 1361 * @ctx: FDIR context info for post processing 1362 * @status: virtchnl FDIR program status 1363 * 1364 * Return: 0 on success, and other on error. 1365 */ 1366 static int 1367 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1368 enum virtchnl_fdir_prgm_status *status) 1369 { 1370 struct device *dev = ice_pf_to_dev(vf->pf); 1371 u32 stat_err, error, prog_id; 1372 int ret; 1373 1374 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1375 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1376 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1377 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1378 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1379 ret = -EINVAL; 1380 goto err_exit; 1381 } 1382 1383 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1384 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1385 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1386 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1387 dev_err(dev, "VF %d: Desc show add, but ctx not", 1388 vf->vf_id); 1389 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1390 ret = -EINVAL; 1391 goto err_exit; 1392 } 1393 1394 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1395 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1396 dev_err(dev, "VF %d: Desc show del, but ctx not", 1397 vf->vf_id); 1398 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1399 ret = -EINVAL; 1400 goto err_exit; 1401 } 1402 1403 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1404 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1405 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1406 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1407 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1408 vf->vf_id); 1409 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1410 } else { 1411 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1412 vf->vf_id); 1413 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1414 } 1415 ret = -EINVAL; 1416 goto err_exit; 1417 } 1418 1419 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1420 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1421 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1422 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1423 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1424 ret = -EINVAL; 1425 goto err_exit; 1426 } 1427 1428 *status = VIRTCHNL_FDIR_SUCCESS; 1429 1430 return 0; 1431 1432 err_exit: 1433 ice_vf_fdir_dump_info(vf); 1434 return ret; 1435 } 1436 1437 /** 1438 * ice_vc_add_fdir_fltr_post 1439 * @vf: pointer to the VF structure 1440 * @ctx: FDIR context info for post processing 1441 * @status: virtchnl FDIR program status 1442 * @success: true implies success, false implies failure 1443 * 1444 * Post process for flow director add command. If success, then do post process 1445 * and send back success msg by virtchnl. Otherwise, do context reversion and 1446 * send back failure msg by virtchnl. 1447 * 1448 * Return: 0 on success, and other on error. 1449 */ 1450 static int 1451 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1452 enum virtchnl_fdir_prgm_status status, 1453 bool success) 1454 { 1455 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1456 struct device *dev = ice_pf_to_dev(vf->pf); 1457 enum virtchnl_status_code v_ret; 1458 struct virtchnl_fdir_add *resp; 1459 int ret, len, is_tun; 1460 1461 v_ret = VIRTCHNL_STATUS_SUCCESS; 1462 len = sizeof(*resp); 1463 resp = kzalloc(len, GFP_KERNEL); 1464 if (!resp) { 1465 len = 0; 1466 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1467 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1468 goto err_exit; 1469 } 1470 1471 if (!success) 1472 goto err_exit; 1473 1474 is_tun = 0; 1475 resp->status = status; 1476 resp->flow_id = conf->flow_id; 1477 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1478 1479 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1480 (u8 *)resp, len); 1481 kfree(resp); 1482 1483 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1484 vf->vf_id, conf->flow_id, 1485 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1486 "add" : "del"); 1487 return ret; 1488 1489 err_exit: 1490 if (resp) 1491 resp->status = status; 1492 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1493 devm_kfree(dev, conf); 1494 1495 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1496 (u8 *)resp, len); 1497 kfree(resp); 1498 return ret; 1499 } 1500 1501 /** 1502 * ice_vc_del_fdir_fltr_post 1503 * @vf: pointer to the VF structure 1504 * @ctx: FDIR context info for post processing 1505 * @status: virtchnl FDIR program status 1506 * @success: true implies success, false implies failure 1507 * 1508 * Post process for flow director del command. If success, then do post process 1509 * and send back success msg by virtchnl. Otherwise, do context reversion and 1510 * send back failure msg by virtchnl. 1511 * 1512 * Return: 0 on success, and other on error. 1513 */ 1514 static int 1515 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1516 enum virtchnl_fdir_prgm_status status, 1517 bool success) 1518 { 1519 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1520 struct device *dev = ice_pf_to_dev(vf->pf); 1521 enum virtchnl_status_code v_ret; 1522 struct virtchnl_fdir_del *resp; 1523 int ret, len, is_tun; 1524 1525 v_ret = VIRTCHNL_STATUS_SUCCESS; 1526 len = sizeof(*resp); 1527 resp = kzalloc(len, GFP_KERNEL); 1528 if (!resp) { 1529 len = 0; 1530 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1531 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1532 goto err_exit; 1533 } 1534 1535 if (!success) 1536 goto err_exit; 1537 1538 is_tun = 0; 1539 resp->status = status; 1540 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1541 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1542 1543 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1544 (u8 *)resp, len); 1545 kfree(resp); 1546 1547 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1548 vf->vf_id, conf->flow_id, 1549 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1550 "add" : "del"); 1551 devm_kfree(dev, conf); 1552 return ret; 1553 1554 err_exit: 1555 if (resp) 1556 resp->status = status; 1557 if (success) 1558 devm_kfree(dev, conf); 1559 1560 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1561 (u8 *)resp, len); 1562 kfree(resp); 1563 return ret; 1564 } 1565 1566 /** 1567 * ice_flush_fdir_ctx 1568 * @pf: pointer to the PF structure 1569 * 1570 * Flush all the pending event on ctx_done list and process them. 1571 */ 1572 void ice_flush_fdir_ctx(struct ice_pf *pf) 1573 { 1574 int i; 1575 1576 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1577 return; 1578 1579 ice_for_each_vf(pf, i) { 1580 struct device *dev = ice_pf_to_dev(pf); 1581 enum virtchnl_fdir_prgm_status status; 1582 struct ice_vf *vf = &pf->vf[i]; 1583 struct ice_vf_fdir_ctx *ctx; 1584 unsigned long flags; 1585 int ret; 1586 1587 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1588 continue; 1589 1590 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1591 continue; 1592 1593 ctx = &vf->fdir.ctx_done; 1594 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1595 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1596 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1597 continue; 1598 } 1599 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1600 1601 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1602 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1603 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1604 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1605 vf->vf_id); 1606 goto err_exit; 1607 } 1608 1609 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1610 if (ret) 1611 goto err_exit; 1612 1613 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1614 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1615 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1616 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1617 else 1618 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1619 1620 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1621 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1622 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1623 continue; 1624 err_exit: 1625 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1626 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1627 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1628 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1629 else 1630 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1631 1632 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1633 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1634 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1635 } 1636 } 1637 1638 /** 1639 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1640 * @vf: pointer to the VF structure 1641 * @conf: FDIR configuration for each filter 1642 * @v_opcode: virtual channel operation code 1643 * 1644 * Return: 0 on success, and other on error. 1645 */ 1646 static int 1647 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1648 enum virtchnl_ops v_opcode) 1649 { 1650 struct device *dev = ice_pf_to_dev(vf->pf); 1651 struct ice_vf_fdir_ctx *ctx; 1652 unsigned long flags; 1653 1654 ctx = &vf->fdir.ctx_irq; 1655 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1656 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1657 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1658 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1659 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1660 vf->vf_id); 1661 return -EBUSY; 1662 } 1663 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1664 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1665 1666 ctx->conf = conf; 1667 ctx->v_opcode = v_opcode; 1668 ctx->stat = ICE_FDIR_CTX_READY; 1669 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1670 1671 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1672 1673 return 0; 1674 } 1675 1676 /** 1677 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1678 * @vf: pointer to the VF structure 1679 * 1680 * Return: 0 on success, and other on error. 1681 */ 1682 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1683 { 1684 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1685 unsigned long flags; 1686 1687 del_timer(&ctx->rx_tmr); 1688 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1689 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1690 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1691 } 1692 1693 /** 1694 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1695 * @vf: pointer to the VF info 1696 * @msg: pointer to the msg buffer 1697 * 1698 * Return: 0 on success, and other on error. 1699 */ 1700 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1701 { 1702 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1703 struct virtchnl_fdir_add *stat = NULL; 1704 struct virtchnl_fdir_fltr_conf *conf; 1705 enum virtchnl_status_code v_ret; 1706 struct device *dev; 1707 struct ice_pf *pf; 1708 int is_tun = 0; 1709 int len = 0; 1710 int ret; 1711 1712 pf = vf->pf; 1713 dev = ice_pf_to_dev(pf); 1714 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1715 if (ret) { 1716 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1717 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1718 goto err_exit; 1719 } 1720 1721 ret = ice_vf_start_ctrl_vsi(vf); 1722 if (ret && (ret != -EEXIST)) { 1723 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1724 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1725 vf->vf_id, ret); 1726 goto err_exit; 1727 } 1728 1729 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1730 if (!stat) { 1731 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1732 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1733 goto err_exit; 1734 } 1735 1736 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1737 if (!conf) { 1738 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1739 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1740 goto err_exit; 1741 } 1742 1743 len = sizeof(*stat); 1744 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1745 if (ret) { 1746 v_ret = VIRTCHNL_STATUS_SUCCESS; 1747 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1748 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1749 goto err_free_conf; 1750 } 1751 1752 if (fltr->validate_only) { 1753 v_ret = VIRTCHNL_STATUS_SUCCESS; 1754 stat->status = VIRTCHNL_FDIR_SUCCESS; 1755 devm_kfree(dev, conf); 1756 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1757 v_ret, (u8 *)stat, len); 1758 goto exit; 1759 } 1760 1761 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1762 if (ret) { 1763 v_ret = VIRTCHNL_STATUS_SUCCESS; 1764 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1765 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1766 vf->vf_id, ret); 1767 goto err_free_conf; 1768 } 1769 1770 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1771 if (ret) { 1772 v_ret = VIRTCHNL_STATUS_SUCCESS; 1773 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1774 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1775 vf->vf_id); 1776 goto err_free_conf; 1777 } 1778 1779 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1780 if (ret) { 1781 v_ret = VIRTCHNL_STATUS_SUCCESS; 1782 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1783 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1784 goto err_free_conf; 1785 } 1786 1787 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1788 if (ret) { 1789 v_ret = VIRTCHNL_STATUS_SUCCESS; 1790 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1791 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1792 goto err_free_conf; 1793 } 1794 1795 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1796 if (ret) { 1797 v_ret = VIRTCHNL_STATUS_SUCCESS; 1798 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1799 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1800 vf->vf_id, ret); 1801 goto err_rem_entry; 1802 } 1803 1804 exit: 1805 kfree(stat); 1806 return ret; 1807 1808 err_rem_entry: 1809 ice_vc_fdir_clear_irq_ctx(vf); 1810 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1811 err_free_conf: 1812 devm_kfree(dev, conf); 1813 err_exit: 1814 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1815 (u8 *)stat, len); 1816 kfree(stat); 1817 return ret; 1818 } 1819 1820 /** 1821 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1822 * @vf: pointer to the VF info 1823 * @msg: pointer to the msg buffer 1824 * 1825 * Return: 0 on success, and other on error. 1826 */ 1827 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1828 { 1829 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1830 struct virtchnl_fdir_del *stat = NULL; 1831 struct virtchnl_fdir_fltr_conf *conf; 1832 enum virtchnl_status_code v_ret; 1833 struct device *dev; 1834 struct ice_pf *pf; 1835 int is_tun = 0; 1836 int len = 0; 1837 int ret; 1838 1839 pf = vf->pf; 1840 dev = ice_pf_to_dev(pf); 1841 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1842 if (ret) { 1843 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1844 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1845 goto err_exit; 1846 } 1847 1848 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1849 if (!stat) { 1850 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1851 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1852 goto err_exit; 1853 } 1854 1855 len = sizeof(*stat); 1856 1857 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1858 if (!conf) { 1859 v_ret = VIRTCHNL_STATUS_SUCCESS; 1860 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1861 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1862 vf->vf_id, fltr->flow_id); 1863 goto err_exit; 1864 } 1865 1866 /* Just return failure when ctrl_vsi idx is invalid */ 1867 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1868 v_ret = VIRTCHNL_STATUS_SUCCESS; 1869 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1870 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1871 goto err_exit; 1872 } 1873 1874 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1875 if (ret) { 1876 v_ret = VIRTCHNL_STATUS_SUCCESS; 1877 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1878 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1879 goto err_exit; 1880 } 1881 1882 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1883 if (ret) { 1884 v_ret = VIRTCHNL_STATUS_SUCCESS; 1885 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1886 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1887 vf->vf_id, ret); 1888 goto err_del_tmr; 1889 } 1890 1891 kfree(stat); 1892 1893 return ret; 1894 1895 err_del_tmr: 1896 ice_vc_fdir_clear_irq_ctx(vf); 1897 err_exit: 1898 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 1899 (u8 *)stat, len); 1900 kfree(stat); 1901 return ret; 1902 } 1903 1904 /** 1905 * ice_vf_fdir_init - init FDIR resource for VF 1906 * @vf: pointer to the VF info 1907 */ 1908 void ice_vf_fdir_init(struct ice_vf *vf) 1909 { 1910 struct ice_vf_fdir *fdir = &vf->fdir; 1911 1912 idr_init(&fdir->fdir_rule_idr); 1913 INIT_LIST_HEAD(&fdir->fdir_rule_list); 1914 1915 spin_lock_init(&fdir->ctx_lock); 1916 fdir->ctx_irq.flags = 0; 1917 fdir->ctx_done.flags = 0; 1918 } 1919 1920 /** 1921 * ice_vf_fdir_exit - destroy FDIR resource for VF 1922 * @vf: pointer to the VF info 1923 */ 1924 void ice_vf_fdir_exit(struct ice_vf *vf) 1925 { 1926 ice_vc_fdir_flush_entry(vf); 1927 idr_destroy(&vf->fdir.fdir_rule_idr); 1928 ice_vc_fdir_rem_prof_all(vf); 1929 ice_vc_fdir_free_prof_all(vf); 1930 } 1931