1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define ICE_FLOW_PROF_TYPE_S 0 14 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 15 #define ICE_FLOW_PROF_VSI_S 32 16 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 17 18 /* Flow profile ID format: 19 * [0:31] - flow type, flow + tun_offs 20 * [32:63] - VSI index 21 */ 22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 24 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 25 26 #define GTPU_TEID_OFFSET 4 27 #define GTPU_EH_QFI_OFFSET 1 28 #define GTPU_EH_QFI_MASK 0x3F 29 #define PFCP_S_OFFSET 0 30 #define PFCP_S_MASK 0x1 31 #define PFCP_PORT_NR 8805 32 33 #define FDIR_INSET_FLAG_ESP_S 0 34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 37 38 enum ice_fdir_tunnel_type { 39 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 40 ICE_FDIR_TUNNEL_TYPE_GTPU, 41 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 42 }; 43 44 struct virtchnl_fdir_fltr_conf { 45 struct ice_fdir_fltr input; 46 enum ice_fdir_tunnel_type ttype; 47 u64 inset_flag; 48 u32 flow_id; 49 }; 50 51 struct virtchnl_fdir_inset_map { 52 enum virtchnl_proto_hdr_field field; 53 enum ice_flow_field fld; 54 u64 flag; 55 u64 mask; 56 }; 57 58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 59 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 79 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 80 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 81 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 82 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 85 }; 86 87 /** 88 * ice_vc_fdir_param_check 89 * @vf: pointer to the VF structure 90 * @vsi_id: VF relative VSI ID 91 * 92 * Check for the valid VSI ID, PF's state and VF's state 93 * 94 * Return: 0 on success, and -EINVAL on error. 95 */ 96 static int 97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 98 { 99 struct ice_pf *pf = vf->pf; 100 101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 102 return -EINVAL; 103 104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 105 return -EINVAL; 106 107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 108 return -EINVAL; 109 110 if (vsi_id != vf->lan_vsi_num) 111 return -EINVAL; 112 113 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 114 return -EINVAL; 115 116 if (!ice_get_vf_vsi(vf)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 /** 123 * ice_vf_start_ctrl_vsi 124 * @vf: pointer to the VF structure 125 * 126 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 127 * 128 * Return: 0 on success, and other on error. 129 */ 130 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 131 { 132 struct ice_pf *pf = vf->pf; 133 struct ice_vsi *ctrl_vsi; 134 struct device *dev; 135 int err; 136 137 dev = ice_pf_to_dev(pf); 138 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 139 return -EEXIST; 140 141 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 142 if (!ctrl_vsi) { 143 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 144 vf->vf_id); 145 return -ENOMEM; 146 } 147 148 err = ice_vsi_open_ctrl(ctrl_vsi); 149 if (err) { 150 dev_dbg(dev, "Could not open control VSI for VF %d\n", 151 vf->vf_id); 152 goto err_vsi_open; 153 } 154 155 return 0; 156 157 err_vsi_open: 158 ice_vsi_release(ctrl_vsi); 159 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 160 pf->vsi[vf->ctrl_vsi_idx] = NULL; 161 vf->ctrl_vsi_idx = ICE_NO_VSI; 162 } 163 return err; 164 } 165 166 /** 167 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 168 * @vf: pointer to the VF structure 169 * @flow: filter flow type 170 * 171 * Return: 0 on success, and other on error. 172 */ 173 static int 174 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 175 { 176 struct ice_vf_fdir *fdir = &vf->fdir; 177 178 if (!fdir->fdir_prof) { 179 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 180 ICE_FLTR_PTYPE_MAX, 181 sizeof(*fdir->fdir_prof), 182 GFP_KERNEL); 183 if (!fdir->fdir_prof) 184 return -ENOMEM; 185 } 186 187 if (!fdir->fdir_prof[flow]) { 188 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 189 sizeof(**fdir->fdir_prof), 190 GFP_KERNEL); 191 if (!fdir->fdir_prof[flow]) 192 return -ENOMEM; 193 } 194 195 return 0; 196 } 197 198 /** 199 * ice_vc_fdir_free_prof - free profile for this filter flow type 200 * @vf: pointer to the VF structure 201 * @flow: filter flow type 202 */ 203 static void 204 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 205 { 206 struct ice_vf_fdir *fdir = &vf->fdir; 207 208 if (!fdir->fdir_prof) 209 return; 210 211 if (!fdir->fdir_prof[flow]) 212 return; 213 214 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 215 fdir->fdir_prof[flow] = NULL; 216 } 217 218 /** 219 * ice_vc_fdir_free_prof_all - free all the profile for this VF 220 * @vf: pointer to the VF structure 221 */ 222 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 223 { 224 struct ice_vf_fdir *fdir = &vf->fdir; 225 enum ice_fltr_ptype flow; 226 227 if (!fdir->fdir_prof) 228 return; 229 230 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 231 ice_vc_fdir_free_prof(vf, flow); 232 233 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 234 fdir->fdir_prof = NULL; 235 } 236 237 /** 238 * ice_vc_fdir_parse_flow_fld 239 * @proto_hdr: virtual channel protocol filter header 240 * @conf: FDIR configuration for each filter 241 * @fld: field type array 242 * @fld_cnt: field counter 243 * 244 * Parse the virtual channel filter header and store them into field type array 245 * 246 * Return: 0 on success, and other on error. 247 */ 248 static int 249 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 250 struct virtchnl_fdir_fltr_conf *conf, 251 enum ice_flow_field *fld, int *fld_cnt) 252 { 253 struct virtchnl_proto_hdr hdr; 254 u32 i; 255 256 memcpy(&hdr, proto_hdr, sizeof(hdr)); 257 258 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 259 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 260 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 261 if (fdir_inset_map[i].mask && 262 ((fdir_inset_map[i].mask & conf->inset_flag) != 263 fdir_inset_map[i].flag)) 264 continue; 265 266 fld[*fld_cnt] = fdir_inset_map[i].fld; 267 *fld_cnt += 1; 268 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 269 return -EINVAL; 270 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 271 fdir_inset_map[i].field); 272 } 273 274 return 0; 275 } 276 277 /** 278 * ice_vc_fdir_set_flow_fld 279 * @vf: pointer to the VF structure 280 * @fltr: virtual channel add cmd buffer 281 * @conf: FDIR configuration for each filter 282 * @seg: array of one or more packet segments that describe the flow 283 * 284 * Parse the virtual channel add msg buffer's field vector and store them into 285 * flow's packet segment field 286 * 287 * Return: 0 on success, and other on error. 288 */ 289 static int 290 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 291 struct virtchnl_fdir_fltr_conf *conf, 292 struct ice_flow_seg_info *seg) 293 { 294 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 295 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 296 struct device *dev = ice_pf_to_dev(vf->pf); 297 struct virtchnl_proto_hdrs *proto; 298 int fld_cnt = 0; 299 int i; 300 301 proto = &rule->proto_hdrs; 302 for (i = 0; i < proto->count; i++) { 303 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 304 int ret; 305 306 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 307 if (ret) 308 return ret; 309 } 310 311 if (fld_cnt == 0) { 312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 313 return -EINVAL; 314 } 315 316 for (i = 0; i < fld_cnt; i++) 317 ice_flow_set_fld(seg, fld[i], 318 ICE_FLOW_FLD_OFF_INVAL, 319 ICE_FLOW_FLD_OFF_INVAL, 320 ICE_FLOW_FLD_OFF_INVAL, false); 321 322 return 0; 323 } 324 325 /** 326 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 327 * @vf: pointer to the VF structure 328 * @conf: FDIR configuration for each filter 329 * @seg: array of one or more packet segments that describe the flow 330 * 331 * Return: 0 on success, and other on error. 332 */ 333 static int 334 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 335 struct virtchnl_fdir_fltr_conf *conf, 336 struct ice_flow_seg_info *seg) 337 { 338 enum ice_fltr_ptype flow = conf->input.flow_type; 339 enum ice_fdir_tunnel_type ttype = conf->ttype; 340 struct device *dev = ice_pf_to_dev(vf->pf); 341 342 switch (flow) { 343 case ICE_FLTR_PTYPE_NON_IP_L2: 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 345 break; 346 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 348 ICE_FLOW_SEG_HDR_IPV4 | 349 ICE_FLOW_SEG_HDR_IPV_OTHER); 350 break; 351 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 353 ICE_FLOW_SEG_HDR_IPV4 | 354 ICE_FLOW_SEG_HDR_IPV_OTHER); 355 break; 356 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 358 ICE_FLOW_SEG_HDR_IPV4 | 359 ICE_FLOW_SEG_HDR_IPV_OTHER); 360 break; 361 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 363 ICE_FLOW_SEG_HDR_IPV4 | 364 ICE_FLOW_SEG_HDR_IPV_OTHER); 365 break; 366 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 368 ICE_FLOW_SEG_HDR_IPV4 | 369 ICE_FLOW_SEG_HDR_IPV_OTHER); 370 break; 371 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 373 ICE_FLOW_SEG_HDR_IPV4 | 374 ICE_FLOW_SEG_HDR_IPV_OTHER); 375 break; 376 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 377 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 378 ICE_FLOW_SEG_HDR_IPV_OTHER); 379 break; 380 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 381 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 382 ICE_FLOW_SEG_HDR_IPV4 | 383 ICE_FLOW_SEG_HDR_IPV_OTHER); 384 break; 385 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 387 ICE_FLOW_SEG_HDR_IPV4 | 388 ICE_FLOW_SEG_HDR_IPV_OTHER); 389 break; 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 393 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 394 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 396 ICE_FLOW_SEG_HDR_IPV4 | 397 ICE_FLOW_SEG_HDR_IPV_OTHER); 398 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 400 ICE_FLOW_SEG_HDR_GTPU_IP | 401 ICE_FLOW_SEG_HDR_IPV4 | 402 ICE_FLOW_SEG_HDR_IPV_OTHER); 403 } else { 404 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 405 flow, vf->vf_id); 406 return -EINVAL; 407 } 408 break; 409 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 411 ICE_FLOW_SEG_HDR_IPV4 | 412 ICE_FLOW_SEG_HDR_IPV_OTHER); 413 break; 414 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 415 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 416 ICE_FLOW_SEG_HDR_IPV6 | 417 ICE_FLOW_SEG_HDR_IPV_OTHER); 418 break; 419 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 420 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 421 ICE_FLOW_SEG_HDR_IPV6 | 422 ICE_FLOW_SEG_HDR_IPV_OTHER); 423 break; 424 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 425 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 426 ICE_FLOW_SEG_HDR_IPV6 | 427 ICE_FLOW_SEG_HDR_IPV_OTHER); 428 break; 429 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 430 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 431 ICE_FLOW_SEG_HDR_IPV6 | 432 ICE_FLOW_SEG_HDR_IPV_OTHER); 433 break; 434 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 435 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 436 ICE_FLOW_SEG_HDR_IPV6 | 437 ICE_FLOW_SEG_HDR_IPV_OTHER); 438 break; 439 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 441 ICE_FLOW_SEG_HDR_IPV6 | 442 ICE_FLOW_SEG_HDR_IPV_OTHER); 443 break; 444 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 445 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 446 ICE_FLOW_SEG_HDR_IPV_OTHER); 447 break; 448 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 449 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 450 ICE_FLOW_SEG_HDR_IPV6 | 451 ICE_FLOW_SEG_HDR_IPV_OTHER); 452 break; 453 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 454 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 455 ICE_FLOW_SEG_HDR_IPV6 | 456 ICE_FLOW_SEG_HDR_IPV_OTHER); 457 break; 458 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 460 ICE_FLOW_SEG_HDR_IPV6 | 461 ICE_FLOW_SEG_HDR_IPV_OTHER); 462 break; 463 default: 464 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 465 flow, vf->vf_id); 466 return -EINVAL; 467 } 468 469 return 0; 470 } 471 472 /** 473 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 474 * @vf: pointer to the VF structure 475 * @flow: filter flow type 476 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 477 */ 478 static void 479 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 480 { 481 struct ice_vf_fdir *fdir = &vf->fdir; 482 struct ice_fd_hw_prof *vf_prof; 483 struct ice_pf *pf = vf->pf; 484 struct ice_vsi *vf_vsi; 485 struct device *dev; 486 struct ice_hw *hw; 487 u64 prof_id; 488 int i; 489 490 dev = ice_pf_to_dev(pf); 491 hw = &pf->hw; 492 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 493 return; 494 495 vf_prof = fdir->fdir_prof[flow]; 496 497 vf_vsi = ice_get_vf_vsi(vf); 498 if (!vf_vsi) { 499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 500 return; 501 } 502 503 if (!fdir->prof_entry_cnt[flow][tun]) 504 return; 505 506 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 507 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 508 509 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 510 if (vf_prof->entry_h[i][tun]) { 511 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 512 513 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 514 ice_flow_rem_entry(hw, ICE_BLK_FD, 515 vf_prof->entry_h[i][tun]); 516 vf_prof->entry_h[i][tun] = 0; 517 } 518 519 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 520 devm_kfree(dev, vf_prof->fdir_seg[tun]); 521 vf_prof->fdir_seg[tun] = NULL; 522 523 for (i = 0; i < vf_prof->cnt; i++) 524 vf_prof->vsi_h[i] = 0; 525 526 fdir->prof_entry_cnt[flow][tun] = 0; 527 } 528 529 /** 530 * ice_vc_fdir_rem_prof_all - remove profile for this VF 531 * @vf: pointer to the VF structure 532 */ 533 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 534 { 535 enum ice_fltr_ptype flow; 536 537 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 538 flow < ICE_FLTR_PTYPE_MAX; flow++) { 539 ice_vc_fdir_rem_prof(vf, flow, 0); 540 ice_vc_fdir_rem_prof(vf, flow, 1); 541 } 542 } 543 544 /** 545 * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR 546 * @fdir: pointer to the VF FDIR structure 547 */ 548 static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) 549 { 550 enum ice_fltr_ptype flow; 551 552 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 553 flow < ICE_FLTR_PTYPE_MAX; flow++) { 554 fdir->fdir_fltr_cnt[flow][0] = 0; 555 fdir->fdir_fltr_cnt[flow][1] = 0; 556 } 557 } 558 559 /** 560 * ice_vc_fdir_has_prof_conflict 561 * @vf: pointer to the VF structure 562 * @conf: FDIR configuration for each filter 563 * 564 * Check if @conf has conflicting profile with existing profiles 565 * 566 * Return: true on success, and false on error. 567 */ 568 static bool 569 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 570 struct virtchnl_fdir_fltr_conf *conf) 571 { 572 struct ice_fdir_fltr *desc; 573 574 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 575 struct virtchnl_fdir_fltr_conf *existing_conf; 576 enum ice_fltr_ptype flow_type_a, flow_type_b; 577 struct ice_fdir_fltr *a, *b; 578 579 existing_conf = to_fltr_conf_from_desc(desc); 580 a = &existing_conf->input; 581 b = &conf->input; 582 flow_type_a = a->flow_type; 583 flow_type_b = b->flow_type; 584 585 /* No need to compare two rules with different tunnel types or 586 * with the same protocol type. 587 */ 588 if (existing_conf->ttype != conf->ttype || 589 flow_type_a == flow_type_b) 590 continue; 591 592 switch (flow_type_a) { 593 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 594 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 595 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 596 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 597 return true; 598 break; 599 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 600 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 601 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 602 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 603 return true; 604 break; 605 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 606 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 607 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 608 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 609 return true; 610 break; 611 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 612 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 613 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 614 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 615 return true; 616 break; 617 default: 618 break; 619 } 620 } 621 622 return false; 623 } 624 625 /** 626 * ice_vc_fdir_write_flow_prof 627 * @vf: pointer to the VF structure 628 * @flow: filter flow type 629 * @seg: array of one or more packet segments that describe the flow 630 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 631 * 632 * Write the flow's profile config and packet segment into the hardware 633 * 634 * Return: 0 on success, and other on error. 635 */ 636 static int 637 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 638 struct ice_flow_seg_info *seg, int tun) 639 { 640 struct ice_vf_fdir *fdir = &vf->fdir; 641 struct ice_vsi *vf_vsi, *ctrl_vsi; 642 struct ice_flow_seg_info *old_seg; 643 struct ice_flow_prof *prof = NULL; 644 struct ice_fd_hw_prof *vf_prof; 645 struct device *dev; 646 struct ice_pf *pf; 647 struct ice_hw *hw; 648 u64 entry1_h = 0; 649 u64 entry2_h = 0; 650 u64 prof_id; 651 int ret; 652 653 pf = vf->pf; 654 dev = ice_pf_to_dev(pf); 655 hw = &pf->hw; 656 vf_vsi = ice_get_vf_vsi(vf); 657 if (!vf_vsi) 658 return -EINVAL; 659 660 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 661 if (!ctrl_vsi) 662 return -EINVAL; 663 664 vf_prof = fdir->fdir_prof[flow]; 665 old_seg = vf_prof->fdir_seg[tun]; 666 if (old_seg) { 667 if (!memcmp(old_seg, seg, sizeof(*seg))) { 668 dev_dbg(dev, "Duplicated profile for VF %d!\n", 669 vf->vf_id); 670 return -EEXIST; 671 } 672 673 if (fdir->fdir_fltr_cnt[flow][tun]) { 674 ret = -EINVAL; 675 dev_dbg(dev, "Input set conflicts for VF %d\n", 676 vf->vf_id); 677 goto err_exit; 678 } 679 680 /* remove previously allocated profile */ 681 ice_vc_fdir_rem_prof(vf, flow, tun); 682 } 683 684 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 685 tun ? ICE_FLTR_PTYPE_MAX : 0); 686 687 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 688 tun + 1, &prof); 689 if (ret) { 690 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 691 flow, vf->vf_id); 692 goto err_exit; 693 } 694 695 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 696 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 697 seg, &entry1_h); 698 if (ret) { 699 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 700 flow, vf->vf_id); 701 goto err_prof; 702 } 703 704 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 705 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 706 seg, &entry2_h); 707 if (ret) { 708 dev_dbg(dev, 709 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 710 flow, vf->vf_id); 711 goto err_entry_1; 712 } 713 714 vf_prof->fdir_seg[tun] = seg; 715 vf_prof->cnt = 0; 716 fdir->prof_entry_cnt[flow][tun] = 0; 717 718 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 719 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 720 vf_prof->cnt++; 721 fdir->prof_entry_cnt[flow][tun]++; 722 723 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 724 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 725 vf_prof->cnt++; 726 fdir->prof_entry_cnt[flow][tun]++; 727 728 return 0; 729 730 err_entry_1: 731 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 732 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 733 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 734 err_prof: 735 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 736 err_exit: 737 return ret; 738 } 739 740 /** 741 * ice_vc_fdir_config_input_set 742 * @vf: pointer to the VF structure 743 * @fltr: virtual channel add cmd buffer 744 * @conf: FDIR configuration for each filter 745 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 746 * 747 * Config the input set type and value for virtual channel add msg buffer 748 * 749 * Return: 0 on success, and other on error. 750 */ 751 static int 752 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 753 struct virtchnl_fdir_fltr_conf *conf, int tun) 754 { 755 struct ice_fdir_fltr *input = &conf->input; 756 struct device *dev = ice_pf_to_dev(vf->pf); 757 struct ice_flow_seg_info *seg; 758 enum ice_fltr_ptype flow; 759 int ret; 760 761 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 762 if (ret) { 763 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 764 vf->vf_id); 765 return ret; 766 } 767 768 flow = input->flow_type; 769 ret = ice_vc_fdir_alloc_prof(vf, flow); 770 if (ret) { 771 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 772 return ret; 773 } 774 775 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 776 if (!seg) 777 return -ENOMEM; 778 779 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 780 if (ret) { 781 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 782 goto err_exit; 783 } 784 785 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 786 if (ret) { 787 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 788 goto err_exit; 789 } 790 791 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 792 if (ret == -EEXIST) { 793 devm_kfree(dev, seg); 794 } else if (ret) { 795 dev_dbg(dev, "Write flow profile for VF %d failed\n", 796 vf->vf_id); 797 goto err_exit; 798 } 799 800 return 0; 801 802 err_exit: 803 devm_kfree(dev, seg); 804 return ret; 805 } 806 807 /** 808 * ice_vc_fdir_parse_pattern 809 * @vf: pointer to the VF info 810 * @fltr: virtual channel add cmd buffer 811 * @conf: FDIR configuration for each filter 812 * 813 * Parse the virtual channel filter's pattern and store them into conf 814 * 815 * Return: 0 on success, and other on error. 816 */ 817 static int 818 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 819 struct virtchnl_fdir_fltr_conf *conf) 820 { 821 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 822 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 823 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 824 struct device *dev = ice_pf_to_dev(vf->pf); 825 struct ice_fdir_fltr *input = &conf->input; 826 int i; 827 828 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 829 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 830 proto->count, vf->vf_id); 831 return -EINVAL; 832 } 833 834 for (i = 0; i < proto->count; i++) { 835 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 836 struct ip_esp_hdr *esph; 837 struct ip_auth_hdr *ah; 838 struct sctphdr *sctph; 839 struct ipv6hdr *ip6h; 840 struct udphdr *udph; 841 struct tcphdr *tcph; 842 struct ethhdr *eth; 843 struct iphdr *iph; 844 u8 s_field; 845 u8 *rawh; 846 847 switch (hdr->type) { 848 case VIRTCHNL_PROTO_HDR_ETH: 849 eth = (struct ethhdr *)hdr->buffer; 850 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 851 852 if (hdr->field_selector) 853 input->ext_data.ether_type = eth->h_proto; 854 break; 855 case VIRTCHNL_PROTO_HDR_IPV4: 856 iph = (struct iphdr *)hdr->buffer; 857 l3 = VIRTCHNL_PROTO_HDR_IPV4; 858 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 859 860 if (hdr->field_selector) { 861 input->ip.v4.src_ip = iph->saddr; 862 input->ip.v4.dst_ip = iph->daddr; 863 input->ip.v4.tos = iph->tos; 864 input->ip.v4.proto = iph->protocol; 865 } 866 break; 867 case VIRTCHNL_PROTO_HDR_IPV6: 868 ip6h = (struct ipv6hdr *)hdr->buffer; 869 l3 = VIRTCHNL_PROTO_HDR_IPV6; 870 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 871 872 if (hdr->field_selector) { 873 memcpy(input->ip.v6.src_ip, 874 ip6h->saddr.in6_u.u6_addr8, 875 sizeof(ip6h->saddr)); 876 memcpy(input->ip.v6.dst_ip, 877 ip6h->daddr.in6_u.u6_addr8, 878 sizeof(ip6h->daddr)); 879 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 880 (ip6h->flow_lbl[0] >> 4); 881 input->ip.v6.proto = ip6h->nexthdr; 882 } 883 break; 884 case VIRTCHNL_PROTO_HDR_TCP: 885 tcph = (struct tcphdr *)hdr->buffer; 886 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 887 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 888 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 889 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 890 891 if (hdr->field_selector) { 892 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 893 input->ip.v4.src_port = tcph->source; 894 input->ip.v4.dst_port = tcph->dest; 895 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 896 input->ip.v6.src_port = tcph->source; 897 input->ip.v6.dst_port = tcph->dest; 898 } 899 } 900 break; 901 case VIRTCHNL_PROTO_HDR_UDP: 902 udph = (struct udphdr *)hdr->buffer; 903 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 904 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 905 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 906 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 907 908 if (hdr->field_selector) { 909 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 910 input->ip.v4.src_port = udph->source; 911 input->ip.v4.dst_port = udph->dest; 912 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 913 input->ip.v6.src_port = udph->source; 914 input->ip.v6.dst_port = udph->dest; 915 } 916 } 917 break; 918 case VIRTCHNL_PROTO_HDR_SCTP: 919 sctph = (struct sctphdr *)hdr->buffer; 920 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 921 input->flow_type = 922 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 923 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 924 input->flow_type = 925 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 926 927 if (hdr->field_selector) { 928 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 929 input->ip.v4.src_port = sctph->source; 930 input->ip.v4.dst_port = sctph->dest; 931 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 932 input->ip.v6.src_port = sctph->source; 933 input->ip.v6.dst_port = sctph->dest; 934 } 935 } 936 break; 937 case VIRTCHNL_PROTO_HDR_L2TPV3: 938 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 939 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 940 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 941 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 942 943 if (hdr->field_selector) 944 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 945 break; 946 case VIRTCHNL_PROTO_HDR_ESP: 947 esph = (struct ip_esp_hdr *)hdr->buffer; 948 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 949 l4 == VIRTCHNL_PROTO_HDR_UDP) 950 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 951 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 952 l4 == VIRTCHNL_PROTO_HDR_UDP) 953 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 954 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 955 l4 == VIRTCHNL_PROTO_HDR_NONE) 956 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 957 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 958 l4 == VIRTCHNL_PROTO_HDR_NONE) 959 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 960 961 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 962 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 963 else 964 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 965 966 if (hdr->field_selector) { 967 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 968 input->ip.v4.sec_parm_idx = esph->spi; 969 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 970 input->ip.v6.sec_parm_idx = esph->spi; 971 } 972 break; 973 case VIRTCHNL_PROTO_HDR_AH: 974 ah = (struct ip_auth_hdr *)hdr->buffer; 975 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 976 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 977 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 978 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 979 980 if (hdr->field_selector) { 981 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 982 input->ip.v4.sec_parm_idx = ah->spi; 983 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 984 input->ip.v6.sec_parm_idx = ah->spi; 985 } 986 break; 987 case VIRTCHNL_PROTO_HDR_PFCP: 988 rawh = (u8 *)hdr->buffer; 989 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 990 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 991 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 992 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 993 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 994 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 995 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 996 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 997 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 998 999 if (hdr->field_selector) { 1000 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 1001 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 1002 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 1003 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 1004 } 1005 break; 1006 case VIRTCHNL_PROTO_HDR_GTPU_IP: 1007 rawh = (u8 *)hdr->buffer; 1008 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 1009 1010 if (hdr->field_selector) 1011 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 1012 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 1013 break; 1014 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1015 rawh = (u8 *)hdr->buffer; 1016 1017 if (hdr->field_selector) 1018 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1019 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1020 break; 1021 default: 1022 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1023 hdr->type, vf->vf_id); 1024 return -EINVAL; 1025 } 1026 } 1027 1028 return 0; 1029 } 1030 1031 /** 1032 * ice_vc_fdir_parse_action 1033 * @vf: pointer to the VF info 1034 * @fltr: virtual channel add cmd buffer 1035 * @conf: FDIR configuration for each filter 1036 * 1037 * Parse the virtual channel filter's action and store them into conf 1038 * 1039 * Return: 0 on success, and other on error. 1040 */ 1041 static int 1042 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1043 struct virtchnl_fdir_fltr_conf *conf) 1044 { 1045 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1046 struct device *dev = ice_pf_to_dev(vf->pf); 1047 struct ice_fdir_fltr *input = &conf->input; 1048 u32 dest_num = 0; 1049 u32 mark_num = 0; 1050 int i; 1051 1052 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1053 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1054 as->count, vf->vf_id); 1055 return -EINVAL; 1056 } 1057 1058 for (i = 0; i < as->count; i++) { 1059 struct virtchnl_filter_action *action = &as->actions[i]; 1060 1061 switch (action->type) { 1062 case VIRTCHNL_ACTION_PASSTHRU: 1063 dest_num++; 1064 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1065 break; 1066 case VIRTCHNL_ACTION_DROP: 1067 dest_num++; 1068 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1069 break; 1070 case VIRTCHNL_ACTION_QUEUE: 1071 dest_num++; 1072 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1073 input->q_index = action->act_conf.queue.index; 1074 break; 1075 case VIRTCHNL_ACTION_Q_REGION: 1076 dest_num++; 1077 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1078 input->q_index = action->act_conf.queue.index; 1079 input->q_region = action->act_conf.queue.region; 1080 break; 1081 case VIRTCHNL_ACTION_MARK: 1082 mark_num++; 1083 input->fltr_id = action->act_conf.mark_id; 1084 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1085 break; 1086 default: 1087 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1088 action->type, vf->vf_id); 1089 return -EINVAL; 1090 } 1091 } 1092 1093 if (dest_num == 0 || dest_num >= 2) { 1094 dev_dbg(dev, "Invalid destination action for VF %d\n", 1095 vf->vf_id); 1096 return -EINVAL; 1097 } 1098 1099 if (mark_num >= 2) { 1100 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1101 return -EINVAL; 1102 } 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1109 * @vf: pointer to the VF info 1110 * @fltr: virtual channel add cmd buffer 1111 * @conf: FDIR configuration for each filter 1112 * 1113 * Return: 0 on success, and other on error. 1114 */ 1115 static int 1116 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1117 struct virtchnl_fdir_fltr_conf *conf) 1118 { 1119 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1120 int ret; 1121 1122 if (!ice_vc_validate_pattern(vf, proto)) 1123 return -EINVAL; 1124 1125 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1126 if (ret) 1127 return ret; 1128 1129 return ice_vc_fdir_parse_action(vf, fltr, conf); 1130 } 1131 1132 /** 1133 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1134 * @conf_a: FDIR configuration for filter a 1135 * @conf_b: FDIR configuration for filter b 1136 * 1137 * Return: 0 on success, and other on error. 1138 */ 1139 static bool 1140 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1141 struct virtchnl_fdir_fltr_conf *conf_b) 1142 { 1143 struct ice_fdir_fltr *a = &conf_a->input; 1144 struct ice_fdir_fltr *b = &conf_b->input; 1145 1146 if (conf_a->ttype != conf_b->ttype) 1147 return false; 1148 if (a->flow_type != b->flow_type) 1149 return false; 1150 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1151 return false; 1152 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1153 return false; 1154 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1155 return false; 1156 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1157 return false; 1158 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1159 return false; 1160 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1161 return false; 1162 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1163 return false; 1164 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1165 return false; 1166 1167 return true; 1168 } 1169 1170 /** 1171 * ice_vc_fdir_is_dup_fltr 1172 * @vf: pointer to the VF info 1173 * @conf: FDIR configuration for each filter 1174 * 1175 * Check if there is duplicated rule with same conf value 1176 * 1177 * Return: 0 true success, and false on error. 1178 */ 1179 static bool 1180 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1181 { 1182 struct ice_fdir_fltr *desc; 1183 bool ret; 1184 1185 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1186 struct virtchnl_fdir_fltr_conf *node = 1187 to_fltr_conf_from_desc(desc); 1188 1189 ret = ice_vc_fdir_comp_rules(node, conf); 1190 if (ret) 1191 return true; 1192 } 1193 1194 return false; 1195 } 1196 1197 /** 1198 * ice_vc_fdir_insert_entry 1199 * @vf: pointer to the VF info 1200 * @conf: FDIR configuration for each filter 1201 * @id: pointer to ID value allocated by driver 1202 * 1203 * Insert FDIR conf entry into list and allocate ID for this filter 1204 * 1205 * Return: 0 true success, and other on error. 1206 */ 1207 static int 1208 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1209 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1210 { 1211 struct ice_fdir_fltr *input = &conf->input; 1212 int i; 1213 1214 /* alloc ID corresponding with conf */ 1215 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1216 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1217 if (i < 0) 1218 return -EINVAL; 1219 *id = i; 1220 1221 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1222 return 0; 1223 } 1224 1225 /** 1226 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1227 * @vf: pointer to the VF info 1228 * @conf: FDIR configuration for each filter 1229 * @id: filter rule's ID 1230 */ 1231 static void 1232 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1233 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1234 { 1235 struct ice_fdir_fltr *input = &conf->input; 1236 1237 idr_remove(&vf->fdir.fdir_rule_idr, id); 1238 list_del(&input->fltr_node); 1239 } 1240 1241 /** 1242 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1243 * @vf: pointer to the VF info 1244 * @id: filter rule's ID 1245 * 1246 * Return: NULL on error, and other on success. 1247 */ 1248 static struct virtchnl_fdir_fltr_conf * 1249 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1250 { 1251 return idr_find(&vf->fdir.fdir_rule_idr, id); 1252 } 1253 1254 /** 1255 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1256 * @vf: pointer to the VF info 1257 */ 1258 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1259 { 1260 struct virtchnl_fdir_fltr_conf *conf; 1261 struct ice_fdir_fltr *desc, *temp; 1262 1263 list_for_each_entry_safe(desc, temp, 1264 &vf->fdir.fdir_rule_list, fltr_node) { 1265 conf = to_fltr_conf_from_desc(desc); 1266 list_del(&desc->fltr_node); 1267 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1268 } 1269 } 1270 1271 /** 1272 * ice_vc_fdir_write_fltr - write filter rule into hardware 1273 * @vf: pointer to the VF info 1274 * @conf: FDIR configuration for each filter 1275 * @add: true implies add rule, false implies del rules 1276 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1277 * 1278 * Return: 0 on success, and other on error. 1279 */ 1280 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1281 struct virtchnl_fdir_fltr_conf *conf, 1282 bool add, bool is_tun) 1283 { 1284 struct ice_fdir_fltr *input = &conf->input; 1285 struct ice_vsi *vsi, *ctrl_vsi; 1286 struct ice_fltr_desc desc; 1287 struct device *dev; 1288 struct ice_pf *pf; 1289 struct ice_hw *hw; 1290 int ret; 1291 u8 *pkt; 1292 1293 pf = vf->pf; 1294 dev = ice_pf_to_dev(pf); 1295 hw = &pf->hw; 1296 vsi = ice_get_vf_vsi(vf); 1297 if (!vsi) { 1298 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1299 return -EINVAL; 1300 } 1301 1302 input->dest_vsi = vsi->idx; 1303 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1304 1305 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1306 if (!ctrl_vsi) { 1307 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1308 return -EINVAL; 1309 } 1310 1311 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1312 if (!pkt) 1313 return -ENOMEM; 1314 1315 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1316 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1317 if (ret) { 1318 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1319 vf->vf_id, input->flow_type); 1320 goto err_free_pkt; 1321 } 1322 1323 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1324 if (ret) 1325 goto err_free_pkt; 1326 1327 return 0; 1328 1329 err_free_pkt: 1330 devm_kfree(dev, pkt); 1331 return ret; 1332 } 1333 1334 /** 1335 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1336 * @t: pointer to timer_list 1337 */ 1338 static void ice_vf_fdir_timer(struct timer_list *t) 1339 { 1340 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1341 struct ice_vf_fdir_ctx *ctx_done; 1342 struct ice_vf_fdir *fdir; 1343 unsigned long flags; 1344 struct ice_vf *vf; 1345 struct ice_pf *pf; 1346 1347 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1348 vf = container_of(fdir, struct ice_vf, fdir); 1349 ctx_done = &fdir->ctx_done; 1350 pf = vf->pf; 1351 spin_lock_irqsave(&fdir->ctx_lock, flags); 1352 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1353 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1354 WARN_ON_ONCE(1); 1355 return; 1356 } 1357 1358 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1359 1360 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1361 ctx_done->conf = ctx_irq->conf; 1362 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1363 ctx_done->v_opcode = ctx_irq->v_opcode; 1364 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1365 1366 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1367 ice_service_task_schedule(pf); 1368 } 1369 1370 /** 1371 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1372 * @ctrl_vsi: pointer to a VF's CTRL VSI 1373 * @rx_desc: pointer to FDIR Rx queue descriptor 1374 */ 1375 void 1376 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1377 union ice_32b_rx_flex_desc *rx_desc) 1378 { 1379 struct ice_pf *pf = ctrl_vsi->back; 1380 struct ice_vf *vf = ctrl_vsi->vf; 1381 struct ice_vf_fdir_ctx *ctx_done; 1382 struct ice_vf_fdir_ctx *ctx_irq; 1383 struct ice_vf_fdir *fdir; 1384 unsigned long flags; 1385 struct device *dev; 1386 int ret; 1387 1388 if (WARN_ON(!vf)) 1389 return; 1390 1391 fdir = &vf->fdir; 1392 ctx_done = &fdir->ctx_done; 1393 ctx_irq = &fdir->ctx_irq; 1394 dev = ice_pf_to_dev(pf); 1395 spin_lock_irqsave(&fdir->ctx_lock, flags); 1396 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1397 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1398 WARN_ON_ONCE(1); 1399 return; 1400 } 1401 1402 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1403 1404 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1405 ctx_done->conf = ctx_irq->conf; 1406 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1407 ctx_done->v_opcode = ctx_irq->v_opcode; 1408 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1409 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1410 1411 ret = del_timer(&ctx_irq->rx_tmr); 1412 if (!ret) 1413 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1414 1415 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1416 ice_service_task_schedule(pf); 1417 } 1418 1419 /** 1420 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1421 * @vf: pointer to the VF info 1422 */ 1423 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1424 { 1425 struct ice_vsi *vf_vsi; 1426 u32 fd_size, fd_cnt; 1427 struct device *dev; 1428 struct ice_pf *pf; 1429 struct ice_hw *hw; 1430 u16 vsi_num; 1431 1432 pf = vf->pf; 1433 hw = &pf->hw; 1434 dev = ice_pf_to_dev(pf); 1435 vf_vsi = ice_get_vf_vsi(vf); 1436 if (!vf_vsi) { 1437 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1438 return; 1439 } 1440 1441 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1442 1443 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1444 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1445 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", 1446 vf->vf_id, 1447 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1448 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1449 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1450 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1451 } 1452 1453 /** 1454 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1455 * @vf: pointer to the VF info 1456 * @ctx: FDIR context info for post processing 1457 * @status: virtchnl FDIR program status 1458 * 1459 * Return: 0 on success, and other on error. 1460 */ 1461 static int 1462 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1463 enum virtchnl_fdir_prgm_status *status) 1464 { 1465 struct device *dev = ice_pf_to_dev(vf->pf); 1466 u32 stat_err, error, prog_id; 1467 int ret; 1468 1469 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1470 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1471 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1472 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1473 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1474 ret = -EINVAL; 1475 goto err_exit; 1476 } 1477 1478 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1479 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1480 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1481 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1482 dev_err(dev, "VF %d: Desc show add, but ctx not", 1483 vf->vf_id); 1484 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1485 ret = -EINVAL; 1486 goto err_exit; 1487 } 1488 1489 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1490 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1491 dev_err(dev, "VF %d: Desc show del, but ctx not", 1492 vf->vf_id); 1493 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1494 ret = -EINVAL; 1495 goto err_exit; 1496 } 1497 1498 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1499 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1500 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1501 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1502 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1503 vf->vf_id); 1504 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1505 } else { 1506 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1507 vf->vf_id); 1508 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1509 } 1510 ret = -EINVAL; 1511 goto err_exit; 1512 } 1513 1514 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1515 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1516 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1517 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1518 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1519 ret = -EINVAL; 1520 goto err_exit; 1521 } 1522 1523 *status = VIRTCHNL_FDIR_SUCCESS; 1524 1525 return 0; 1526 1527 err_exit: 1528 ice_vf_fdir_dump_info(vf); 1529 return ret; 1530 } 1531 1532 /** 1533 * ice_vc_add_fdir_fltr_post 1534 * @vf: pointer to the VF structure 1535 * @ctx: FDIR context info for post processing 1536 * @status: virtchnl FDIR program status 1537 * @success: true implies success, false implies failure 1538 * 1539 * Post process for flow director add command. If success, then do post process 1540 * and send back success msg by virtchnl. Otherwise, do context reversion and 1541 * send back failure msg by virtchnl. 1542 * 1543 * Return: 0 on success, and other on error. 1544 */ 1545 static int 1546 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1547 enum virtchnl_fdir_prgm_status status, 1548 bool success) 1549 { 1550 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1551 struct device *dev = ice_pf_to_dev(vf->pf); 1552 enum virtchnl_status_code v_ret; 1553 struct virtchnl_fdir_add *resp; 1554 int ret, len, is_tun; 1555 1556 v_ret = VIRTCHNL_STATUS_SUCCESS; 1557 len = sizeof(*resp); 1558 resp = kzalloc(len, GFP_KERNEL); 1559 if (!resp) { 1560 len = 0; 1561 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1562 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1563 goto err_exit; 1564 } 1565 1566 if (!success) 1567 goto err_exit; 1568 1569 is_tun = 0; 1570 resp->status = status; 1571 resp->flow_id = conf->flow_id; 1572 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1573 1574 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1575 (u8 *)resp, len); 1576 kfree(resp); 1577 1578 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1579 vf->vf_id, conf->flow_id, 1580 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1581 "add" : "del"); 1582 return ret; 1583 1584 err_exit: 1585 if (resp) 1586 resp->status = status; 1587 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1588 devm_kfree(dev, conf); 1589 1590 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1591 (u8 *)resp, len); 1592 kfree(resp); 1593 return ret; 1594 } 1595 1596 /** 1597 * ice_vc_del_fdir_fltr_post 1598 * @vf: pointer to the VF structure 1599 * @ctx: FDIR context info for post processing 1600 * @status: virtchnl FDIR program status 1601 * @success: true implies success, false implies failure 1602 * 1603 * Post process for flow director del command. If success, then do post process 1604 * and send back success msg by virtchnl. Otherwise, do context reversion and 1605 * send back failure msg by virtchnl. 1606 * 1607 * Return: 0 on success, and other on error. 1608 */ 1609 static int 1610 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1611 enum virtchnl_fdir_prgm_status status, 1612 bool success) 1613 { 1614 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1615 struct device *dev = ice_pf_to_dev(vf->pf); 1616 enum virtchnl_status_code v_ret; 1617 struct virtchnl_fdir_del *resp; 1618 int ret, len, is_tun; 1619 1620 v_ret = VIRTCHNL_STATUS_SUCCESS; 1621 len = sizeof(*resp); 1622 resp = kzalloc(len, GFP_KERNEL); 1623 if (!resp) { 1624 len = 0; 1625 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1626 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1627 goto err_exit; 1628 } 1629 1630 if (!success) 1631 goto err_exit; 1632 1633 is_tun = 0; 1634 resp->status = status; 1635 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1636 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1637 1638 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1639 (u8 *)resp, len); 1640 kfree(resp); 1641 1642 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1643 vf->vf_id, conf->flow_id, 1644 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1645 "add" : "del"); 1646 devm_kfree(dev, conf); 1647 return ret; 1648 1649 err_exit: 1650 if (resp) 1651 resp->status = status; 1652 if (success) 1653 devm_kfree(dev, conf); 1654 1655 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1656 (u8 *)resp, len); 1657 kfree(resp); 1658 return ret; 1659 } 1660 1661 /** 1662 * ice_flush_fdir_ctx 1663 * @pf: pointer to the PF structure 1664 * 1665 * Flush all the pending event on ctx_done list and process them. 1666 */ 1667 void ice_flush_fdir_ctx(struct ice_pf *pf) 1668 { 1669 struct ice_vf *vf; 1670 unsigned int bkt; 1671 1672 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1673 return; 1674 1675 mutex_lock(&pf->vfs.table_lock); 1676 ice_for_each_vf(pf, bkt, vf) { 1677 struct device *dev = ice_pf_to_dev(pf); 1678 enum virtchnl_fdir_prgm_status status; 1679 struct ice_vf_fdir_ctx *ctx; 1680 unsigned long flags; 1681 int ret; 1682 1683 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1684 continue; 1685 1686 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1687 continue; 1688 1689 ctx = &vf->fdir.ctx_done; 1690 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1691 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1692 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1693 continue; 1694 } 1695 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1696 1697 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1698 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1699 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1700 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1701 vf->vf_id); 1702 goto err_exit; 1703 } 1704 1705 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1706 if (ret) 1707 goto err_exit; 1708 1709 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1710 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1711 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1712 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1713 else 1714 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1715 1716 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1717 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1718 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1719 continue; 1720 err_exit: 1721 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1722 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1723 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1724 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1725 else 1726 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1727 1728 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1729 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1730 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1731 } 1732 mutex_unlock(&pf->vfs.table_lock); 1733 } 1734 1735 /** 1736 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1737 * @vf: pointer to the VF structure 1738 * @conf: FDIR configuration for each filter 1739 * @v_opcode: virtual channel operation code 1740 * 1741 * Return: 0 on success, and other on error. 1742 */ 1743 static int 1744 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1745 enum virtchnl_ops v_opcode) 1746 { 1747 struct device *dev = ice_pf_to_dev(vf->pf); 1748 struct ice_vf_fdir_ctx *ctx; 1749 unsigned long flags; 1750 1751 ctx = &vf->fdir.ctx_irq; 1752 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1753 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1754 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1755 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1756 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1757 vf->vf_id); 1758 return -EBUSY; 1759 } 1760 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1761 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1762 1763 ctx->conf = conf; 1764 ctx->v_opcode = v_opcode; 1765 ctx->stat = ICE_FDIR_CTX_READY; 1766 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1767 1768 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1769 1770 return 0; 1771 } 1772 1773 /** 1774 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1775 * @vf: pointer to the VF structure 1776 * 1777 * Return: 0 on success, and other on error. 1778 */ 1779 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1780 { 1781 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1782 unsigned long flags; 1783 1784 del_timer(&ctx->rx_tmr); 1785 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1786 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1787 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1788 } 1789 1790 /** 1791 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1792 * @vf: pointer to the VF info 1793 * @msg: pointer to the msg buffer 1794 * 1795 * Return: 0 on success, and other on error. 1796 */ 1797 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1798 { 1799 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1800 struct virtchnl_fdir_add *stat = NULL; 1801 struct virtchnl_fdir_fltr_conf *conf; 1802 enum virtchnl_status_code v_ret; 1803 struct device *dev; 1804 struct ice_pf *pf; 1805 int is_tun = 0; 1806 int len = 0; 1807 int ret; 1808 1809 pf = vf->pf; 1810 dev = ice_pf_to_dev(pf); 1811 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1812 if (ret) { 1813 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1814 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1815 goto err_exit; 1816 } 1817 1818 ret = ice_vf_start_ctrl_vsi(vf); 1819 if (ret && (ret != -EEXIST)) { 1820 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1821 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1822 vf->vf_id, ret); 1823 goto err_exit; 1824 } 1825 1826 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1827 if (!stat) { 1828 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1829 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1830 goto err_exit; 1831 } 1832 1833 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1834 if (!conf) { 1835 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1836 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1837 goto err_exit; 1838 } 1839 1840 len = sizeof(*stat); 1841 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1842 if (ret) { 1843 v_ret = VIRTCHNL_STATUS_SUCCESS; 1844 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1845 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1846 goto err_free_conf; 1847 } 1848 1849 if (fltr->validate_only) { 1850 v_ret = VIRTCHNL_STATUS_SUCCESS; 1851 stat->status = VIRTCHNL_FDIR_SUCCESS; 1852 devm_kfree(dev, conf); 1853 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1854 v_ret, (u8 *)stat, len); 1855 goto exit; 1856 } 1857 1858 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1859 if (ret) { 1860 v_ret = VIRTCHNL_STATUS_SUCCESS; 1861 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1862 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1863 vf->vf_id, ret); 1864 goto err_free_conf; 1865 } 1866 1867 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1868 if (ret) { 1869 v_ret = VIRTCHNL_STATUS_SUCCESS; 1870 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1871 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1872 vf->vf_id); 1873 goto err_free_conf; 1874 } 1875 1876 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1877 if (ret) { 1878 v_ret = VIRTCHNL_STATUS_SUCCESS; 1879 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1880 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1881 goto err_free_conf; 1882 } 1883 1884 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1885 if (ret) { 1886 v_ret = VIRTCHNL_STATUS_SUCCESS; 1887 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1888 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1889 goto err_rem_entry; 1890 } 1891 1892 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1893 if (ret) { 1894 v_ret = VIRTCHNL_STATUS_SUCCESS; 1895 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1896 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1897 vf->vf_id, ret); 1898 goto err_clr_irq; 1899 } 1900 1901 exit: 1902 kfree(stat); 1903 return ret; 1904 1905 err_clr_irq: 1906 ice_vc_fdir_clear_irq_ctx(vf); 1907 err_rem_entry: 1908 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1909 err_free_conf: 1910 devm_kfree(dev, conf); 1911 err_exit: 1912 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1913 (u8 *)stat, len); 1914 kfree(stat); 1915 return ret; 1916 } 1917 1918 /** 1919 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1920 * @vf: pointer to the VF info 1921 * @msg: pointer to the msg buffer 1922 * 1923 * Return: 0 on success, and other on error. 1924 */ 1925 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1926 { 1927 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1928 struct virtchnl_fdir_del *stat = NULL; 1929 struct virtchnl_fdir_fltr_conf *conf; 1930 enum virtchnl_status_code v_ret; 1931 struct device *dev; 1932 struct ice_pf *pf; 1933 int is_tun = 0; 1934 int len = 0; 1935 int ret; 1936 1937 pf = vf->pf; 1938 dev = ice_pf_to_dev(pf); 1939 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1940 if (ret) { 1941 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1942 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1943 goto err_exit; 1944 } 1945 1946 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1947 if (!stat) { 1948 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1949 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1950 goto err_exit; 1951 } 1952 1953 len = sizeof(*stat); 1954 1955 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1956 if (!conf) { 1957 v_ret = VIRTCHNL_STATUS_SUCCESS; 1958 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1959 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1960 vf->vf_id, fltr->flow_id); 1961 goto err_exit; 1962 } 1963 1964 /* Just return failure when ctrl_vsi idx is invalid */ 1965 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1966 v_ret = VIRTCHNL_STATUS_SUCCESS; 1967 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1968 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1969 goto err_exit; 1970 } 1971 1972 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1973 if (ret) { 1974 v_ret = VIRTCHNL_STATUS_SUCCESS; 1975 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1976 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1977 goto err_exit; 1978 } 1979 1980 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1981 if (ret) { 1982 v_ret = VIRTCHNL_STATUS_SUCCESS; 1983 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1984 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1985 vf->vf_id, ret); 1986 goto err_del_tmr; 1987 } 1988 1989 kfree(stat); 1990 1991 return ret; 1992 1993 err_del_tmr: 1994 ice_vc_fdir_clear_irq_ctx(vf); 1995 err_exit: 1996 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 1997 (u8 *)stat, len); 1998 kfree(stat); 1999 return ret; 2000 } 2001 2002 /** 2003 * ice_vf_fdir_init - init FDIR resource for VF 2004 * @vf: pointer to the VF info 2005 */ 2006 void ice_vf_fdir_init(struct ice_vf *vf) 2007 { 2008 struct ice_vf_fdir *fdir = &vf->fdir; 2009 2010 idr_init(&fdir->fdir_rule_idr); 2011 INIT_LIST_HEAD(&fdir->fdir_rule_list); 2012 2013 spin_lock_init(&fdir->ctx_lock); 2014 fdir->ctx_irq.flags = 0; 2015 fdir->ctx_done.flags = 0; 2016 ice_vc_fdir_reset_cnt_all(fdir); 2017 } 2018 2019 /** 2020 * ice_vf_fdir_exit - destroy FDIR resource for VF 2021 * @vf: pointer to the VF info 2022 */ 2023 void ice_vf_fdir_exit(struct ice_vf *vf) 2024 { 2025 ice_vc_fdir_flush_entry(vf); 2026 idr_destroy(&vf->fdir.fdir_rule_idr); 2027 ice_vc_fdir_rem_prof_all(vf); 2028 ice_vc_fdir_free_prof_all(vf); 2029 } 2030