1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define ICE_FLOW_PROF_TYPE_S 0 14 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 15 #define ICE_FLOW_PROF_VSI_S 32 16 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 17 18 /* Flow profile ID format: 19 * [0:31] - flow type, flow + tun_offs 20 * [32:63] - VSI index 21 */ 22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 24 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 25 26 #define GTPU_TEID_OFFSET 4 27 #define GTPU_EH_QFI_OFFSET 1 28 #define GTPU_EH_QFI_MASK 0x3F 29 #define PFCP_S_OFFSET 0 30 #define PFCP_S_MASK 0x1 31 #define PFCP_PORT_NR 8805 32 33 #define FDIR_INSET_FLAG_ESP_S 0 34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 37 38 enum ice_fdir_tunnel_type { 39 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 40 ICE_FDIR_TUNNEL_TYPE_GTPU, 41 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 42 }; 43 44 struct virtchnl_fdir_fltr_conf { 45 struct ice_fdir_fltr input; 46 enum ice_fdir_tunnel_type ttype; 47 u64 inset_flag; 48 u32 flow_id; 49 }; 50 51 struct virtchnl_fdir_inset_map { 52 enum virtchnl_proto_hdr_field field; 53 enum ice_flow_field fld; 54 u64 flag; 55 u64 mask; 56 }; 57 58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 59 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 79 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 80 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 81 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 82 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 85 }; 86 87 /** 88 * ice_vc_fdir_param_check 89 * @vf: pointer to the VF structure 90 * @vsi_id: VF relative VSI ID 91 * 92 * Check for the valid VSI ID, PF's state and VF's state 93 * 94 * Return: 0 on success, and -EINVAL on error. 95 */ 96 static int 97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 98 { 99 struct ice_pf *pf = vf->pf; 100 101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 102 return -EINVAL; 103 104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 105 return -EINVAL; 106 107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 108 return -EINVAL; 109 110 if (vsi_id != vf->lan_vsi_num) 111 return -EINVAL; 112 113 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 114 return -EINVAL; 115 116 if (!pf->vsi[vf->lan_vsi_idx]) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 /** 123 * ice_vf_start_ctrl_vsi 124 * @vf: pointer to the VF structure 125 * 126 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 127 * 128 * Return: 0 on success, and other on error. 129 */ 130 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 131 { 132 struct ice_pf *pf = vf->pf; 133 struct ice_vsi *ctrl_vsi; 134 struct device *dev; 135 int err; 136 137 dev = ice_pf_to_dev(pf); 138 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 139 return -EEXIST; 140 141 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 142 if (!ctrl_vsi) { 143 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 144 vf->vf_id); 145 return -ENOMEM; 146 } 147 148 err = ice_vsi_open_ctrl(ctrl_vsi); 149 if (err) { 150 dev_dbg(dev, "Could not open control VSI for VF %d\n", 151 vf->vf_id); 152 goto err_vsi_open; 153 } 154 155 return 0; 156 157 err_vsi_open: 158 ice_vsi_release(ctrl_vsi); 159 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 160 pf->vsi[vf->ctrl_vsi_idx] = NULL; 161 vf->ctrl_vsi_idx = ICE_NO_VSI; 162 } 163 return err; 164 } 165 166 /** 167 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 168 * @vf: pointer to the VF structure 169 * @flow: filter flow type 170 * 171 * Return: 0 on success, and other on error. 172 */ 173 static int 174 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 175 { 176 struct ice_vf_fdir *fdir = &vf->fdir; 177 178 if (!fdir->fdir_prof) { 179 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 180 ICE_FLTR_PTYPE_MAX, 181 sizeof(*fdir->fdir_prof), 182 GFP_KERNEL); 183 if (!fdir->fdir_prof) 184 return -ENOMEM; 185 } 186 187 if (!fdir->fdir_prof[flow]) { 188 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 189 sizeof(**fdir->fdir_prof), 190 GFP_KERNEL); 191 if (!fdir->fdir_prof[flow]) 192 return -ENOMEM; 193 } 194 195 return 0; 196 } 197 198 /** 199 * ice_vc_fdir_free_prof - free profile for this filter flow type 200 * @vf: pointer to the VF structure 201 * @flow: filter flow type 202 */ 203 static void 204 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 205 { 206 struct ice_vf_fdir *fdir = &vf->fdir; 207 208 if (!fdir->fdir_prof) 209 return; 210 211 if (!fdir->fdir_prof[flow]) 212 return; 213 214 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 215 fdir->fdir_prof[flow] = NULL; 216 } 217 218 /** 219 * ice_vc_fdir_free_prof_all - free all the profile for this VF 220 * @vf: pointer to the VF structure 221 */ 222 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 223 { 224 struct ice_vf_fdir *fdir = &vf->fdir; 225 enum ice_fltr_ptype flow; 226 227 if (!fdir->fdir_prof) 228 return; 229 230 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 231 ice_vc_fdir_free_prof(vf, flow); 232 233 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 234 fdir->fdir_prof = NULL; 235 } 236 237 /** 238 * ice_vc_fdir_parse_flow_fld 239 * @proto_hdr: virtual channel protocol filter header 240 * @conf: FDIR configuration for each filter 241 * @fld: field type array 242 * @fld_cnt: field counter 243 * 244 * Parse the virtual channel filter header and store them into field type array 245 * 246 * Return: 0 on success, and other on error. 247 */ 248 static int 249 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 250 struct virtchnl_fdir_fltr_conf *conf, 251 enum ice_flow_field *fld, int *fld_cnt) 252 { 253 struct virtchnl_proto_hdr hdr; 254 u32 i; 255 256 memcpy(&hdr, proto_hdr, sizeof(hdr)); 257 258 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 259 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 260 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 261 if (fdir_inset_map[i].mask && 262 ((fdir_inset_map[i].mask & conf->inset_flag) != 263 fdir_inset_map[i].flag)) 264 continue; 265 266 fld[*fld_cnt] = fdir_inset_map[i].fld; 267 *fld_cnt += 1; 268 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 269 return -EINVAL; 270 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 271 fdir_inset_map[i].field); 272 } 273 274 return 0; 275 } 276 277 /** 278 * ice_vc_fdir_set_flow_fld 279 * @vf: pointer to the VF structure 280 * @fltr: virtual channel add cmd buffer 281 * @conf: FDIR configuration for each filter 282 * @seg: array of one or more packet segments that describe the flow 283 * 284 * Parse the virtual channel add msg buffer's field vector and store them into 285 * flow's packet segment field 286 * 287 * Return: 0 on success, and other on error. 288 */ 289 static int 290 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 291 struct virtchnl_fdir_fltr_conf *conf, 292 struct ice_flow_seg_info *seg) 293 { 294 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 295 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 296 struct device *dev = ice_pf_to_dev(vf->pf); 297 struct virtchnl_proto_hdrs *proto; 298 int fld_cnt = 0; 299 int i; 300 301 proto = &rule->proto_hdrs; 302 for (i = 0; i < proto->count; i++) { 303 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 304 int ret; 305 306 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 307 if (ret) 308 return ret; 309 } 310 311 if (fld_cnt == 0) { 312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 313 return -EINVAL; 314 } 315 316 for (i = 0; i < fld_cnt; i++) 317 ice_flow_set_fld(seg, fld[i], 318 ICE_FLOW_FLD_OFF_INVAL, 319 ICE_FLOW_FLD_OFF_INVAL, 320 ICE_FLOW_FLD_OFF_INVAL, false); 321 322 return 0; 323 } 324 325 /** 326 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 327 * @vf: pointer to the VF structure 328 * @conf: FDIR configuration for each filter 329 * @seg: array of one or more packet segments that describe the flow 330 * 331 * Return: 0 on success, and other on error. 332 */ 333 static int 334 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 335 struct virtchnl_fdir_fltr_conf *conf, 336 struct ice_flow_seg_info *seg) 337 { 338 enum ice_fltr_ptype flow = conf->input.flow_type; 339 enum ice_fdir_tunnel_type ttype = conf->ttype; 340 struct device *dev = ice_pf_to_dev(vf->pf); 341 342 switch (flow) { 343 case ICE_FLTR_PTYPE_NON_IP_L2: 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 345 break; 346 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 348 ICE_FLOW_SEG_HDR_IPV4 | 349 ICE_FLOW_SEG_HDR_IPV_OTHER); 350 break; 351 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 353 ICE_FLOW_SEG_HDR_IPV4 | 354 ICE_FLOW_SEG_HDR_IPV_OTHER); 355 break; 356 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 358 ICE_FLOW_SEG_HDR_IPV4 | 359 ICE_FLOW_SEG_HDR_IPV_OTHER); 360 break; 361 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 363 ICE_FLOW_SEG_HDR_IPV4 | 364 ICE_FLOW_SEG_HDR_IPV_OTHER); 365 break; 366 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 368 ICE_FLOW_SEG_HDR_IPV4 | 369 ICE_FLOW_SEG_HDR_IPV_OTHER); 370 break; 371 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 373 ICE_FLOW_SEG_HDR_IPV4 | 374 ICE_FLOW_SEG_HDR_IPV_OTHER); 375 break; 376 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 377 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 378 ICE_FLOW_SEG_HDR_IPV_OTHER); 379 break; 380 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 381 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 382 ICE_FLOW_SEG_HDR_IPV4 | 383 ICE_FLOW_SEG_HDR_IPV_OTHER); 384 break; 385 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 387 ICE_FLOW_SEG_HDR_IPV4 | 388 ICE_FLOW_SEG_HDR_IPV_OTHER); 389 break; 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 393 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 394 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 396 ICE_FLOW_SEG_HDR_IPV4 | 397 ICE_FLOW_SEG_HDR_IPV_OTHER); 398 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 400 ICE_FLOW_SEG_HDR_GTPU_IP | 401 ICE_FLOW_SEG_HDR_IPV4 | 402 ICE_FLOW_SEG_HDR_IPV_OTHER); 403 } else { 404 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 405 flow, vf->vf_id); 406 return -EINVAL; 407 } 408 break; 409 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 411 ICE_FLOW_SEG_HDR_IPV4 | 412 ICE_FLOW_SEG_HDR_IPV_OTHER); 413 break; 414 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 415 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 416 ICE_FLOW_SEG_HDR_IPV6 | 417 ICE_FLOW_SEG_HDR_IPV_OTHER); 418 break; 419 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 420 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 421 ICE_FLOW_SEG_HDR_IPV6 | 422 ICE_FLOW_SEG_HDR_IPV_OTHER); 423 break; 424 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 425 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 426 ICE_FLOW_SEG_HDR_IPV6 | 427 ICE_FLOW_SEG_HDR_IPV_OTHER); 428 break; 429 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 430 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 431 ICE_FLOW_SEG_HDR_IPV6 | 432 ICE_FLOW_SEG_HDR_IPV_OTHER); 433 break; 434 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 435 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 436 ICE_FLOW_SEG_HDR_IPV6 | 437 ICE_FLOW_SEG_HDR_IPV_OTHER); 438 break; 439 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 441 ICE_FLOW_SEG_HDR_IPV6 | 442 ICE_FLOW_SEG_HDR_IPV_OTHER); 443 break; 444 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 445 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 446 ICE_FLOW_SEG_HDR_IPV_OTHER); 447 break; 448 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 449 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 450 ICE_FLOW_SEG_HDR_IPV6 | 451 ICE_FLOW_SEG_HDR_IPV_OTHER); 452 break; 453 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 454 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 455 ICE_FLOW_SEG_HDR_IPV6 | 456 ICE_FLOW_SEG_HDR_IPV_OTHER); 457 break; 458 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 460 ICE_FLOW_SEG_HDR_IPV6 | 461 ICE_FLOW_SEG_HDR_IPV_OTHER); 462 break; 463 default: 464 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 465 flow, vf->vf_id); 466 return -EINVAL; 467 } 468 469 return 0; 470 } 471 472 /** 473 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 474 * @vf: pointer to the VF structure 475 * @flow: filter flow type 476 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 477 */ 478 static void 479 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 480 { 481 struct ice_vf_fdir *fdir = &vf->fdir; 482 struct ice_fd_hw_prof *vf_prof; 483 struct ice_pf *pf = vf->pf; 484 struct ice_vsi *vf_vsi; 485 struct device *dev; 486 struct ice_hw *hw; 487 u64 prof_id; 488 int i; 489 490 dev = ice_pf_to_dev(pf); 491 hw = &pf->hw; 492 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 493 return; 494 495 vf_prof = fdir->fdir_prof[flow]; 496 497 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 498 if (!vf_vsi) { 499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 500 return; 501 } 502 503 if (!fdir->prof_entry_cnt[flow][tun]) 504 return; 505 506 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 507 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 508 509 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 510 if (vf_prof->entry_h[i][tun]) { 511 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 512 513 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 514 ice_flow_rem_entry(hw, ICE_BLK_FD, 515 vf_prof->entry_h[i][tun]); 516 vf_prof->entry_h[i][tun] = 0; 517 } 518 519 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 520 devm_kfree(dev, vf_prof->fdir_seg[tun]); 521 vf_prof->fdir_seg[tun] = NULL; 522 523 for (i = 0; i < vf_prof->cnt; i++) 524 vf_prof->vsi_h[i] = 0; 525 526 fdir->prof_entry_cnt[flow][tun] = 0; 527 } 528 529 /** 530 * ice_vc_fdir_rem_prof_all - remove profile for this VF 531 * @vf: pointer to the VF structure 532 */ 533 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 534 { 535 enum ice_fltr_ptype flow; 536 537 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 538 flow < ICE_FLTR_PTYPE_MAX; flow++) { 539 ice_vc_fdir_rem_prof(vf, flow, 0); 540 ice_vc_fdir_rem_prof(vf, flow, 1); 541 } 542 } 543 544 /** 545 * ice_vc_fdir_write_flow_prof 546 * @vf: pointer to the VF structure 547 * @flow: filter flow type 548 * @seg: array of one or more packet segments that describe the flow 549 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 550 * 551 * Write the flow's profile config and packet segment into the hardware 552 * 553 * Return: 0 on success, and other on error. 554 */ 555 static int 556 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 557 struct ice_flow_seg_info *seg, int tun) 558 { 559 struct ice_vf_fdir *fdir = &vf->fdir; 560 struct ice_vsi *vf_vsi, *ctrl_vsi; 561 struct ice_flow_seg_info *old_seg; 562 struct ice_flow_prof *prof = NULL; 563 struct ice_fd_hw_prof *vf_prof; 564 struct device *dev; 565 struct ice_pf *pf; 566 struct ice_hw *hw; 567 u64 entry1_h = 0; 568 u64 entry2_h = 0; 569 u64 prof_id; 570 int ret; 571 572 pf = vf->pf; 573 dev = ice_pf_to_dev(pf); 574 hw = &pf->hw; 575 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 576 if (!vf_vsi) 577 return -EINVAL; 578 579 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 580 if (!ctrl_vsi) 581 return -EINVAL; 582 583 vf_prof = fdir->fdir_prof[flow]; 584 old_seg = vf_prof->fdir_seg[tun]; 585 if (old_seg) { 586 if (!memcmp(old_seg, seg, sizeof(*seg))) { 587 dev_dbg(dev, "Duplicated profile for VF %d!\n", 588 vf->vf_id); 589 return -EEXIST; 590 } 591 592 if (fdir->fdir_fltr_cnt[flow][tun]) { 593 ret = -EINVAL; 594 dev_dbg(dev, "Input set conflicts for VF %d\n", 595 vf->vf_id); 596 goto err_exit; 597 } 598 599 /* remove previously allocated profile */ 600 ice_vc_fdir_rem_prof(vf, flow, tun); 601 } 602 603 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 604 tun ? ICE_FLTR_PTYPE_MAX : 0); 605 606 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 607 tun + 1, &prof); 608 if (ret) { 609 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 610 flow, vf->vf_id); 611 goto err_exit; 612 } 613 614 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 615 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 616 seg, &entry1_h); 617 if (ret) { 618 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 619 flow, vf->vf_id); 620 goto err_prof; 621 } 622 623 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 624 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 625 seg, &entry2_h); 626 if (ret) { 627 dev_dbg(dev, 628 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 629 flow, vf->vf_id); 630 goto err_entry_1; 631 } 632 633 vf_prof->fdir_seg[tun] = seg; 634 vf_prof->cnt = 0; 635 fdir->prof_entry_cnt[flow][tun] = 0; 636 637 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 638 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 639 vf_prof->cnt++; 640 fdir->prof_entry_cnt[flow][tun]++; 641 642 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 643 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 644 vf_prof->cnt++; 645 fdir->prof_entry_cnt[flow][tun]++; 646 647 return 0; 648 649 err_entry_1: 650 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 651 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 652 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 653 err_prof: 654 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 655 err_exit: 656 return ret; 657 } 658 659 /** 660 * ice_vc_fdir_config_input_set 661 * @vf: pointer to the VF structure 662 * @fltr: virtual channel add cmd buffer 663 * @conf: FDIR configuration for each filter 664 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 665 * 666 * Config the input set type and value for virtual channel add msg buffer 667 * 668 * Return: 0 on success, and other on error. 669 */ 670 static int 671 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 672 struct virtchnl_fdir_fltr_conf *conf, int tun) 673 { 674 struct ice_fdir_fltr *input = &conf->input; 675 struct device *dev = ice_pf_to_dev(vf->pf); 676 struct ice_flow_seg_info *seg; 677 enum ice_fltr_ptype flow; 678 int ret; 679 680 flow = input->flow_type; 681 ret = ice_vc_fdir_alloc_prof(vf, flow); 682 if (ret) { 683 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 684 return ret; 685 } 686 687 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 688 if (!seg) 689 return -ENOMEM; 690 691 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 692 if (ret) { 693 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 694 goto err_exit; 695 } 696 697 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 698 if (ret) { 699 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 700 goto err_exit; 701 } 702 703 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 704 if (ret == -EEXIST) { 705 devm_kfree(dev, seg); 706 } else if (ret) { 707 dev_dbg(dev, "Write flow profile for VF %d failed\n", 708 vf->vf_id); 709 goto err_exit; 710 } 711 712 return 0; 713 714 err_exit: 715 devm_kfree(dev, seg); 716 return ret; 717 } 718 719 /** 720 * ice_vc_fdir_parse_pattern 721 * @vf: pointer to the VF info 722 * @fltr: virtual channel add cmd buffer 723 * @conf: FDIR configuration for each filter 724 * 725 * Parse the virtual channel filter's pattern and store them into conf 726 * 727 * Return: 0 on success, and other on error. 728 */ 729 static int 730 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 731 struct virtchnl_fdir_fltr_conf *conf) 732 { 733 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 734 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 735 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 736 struct device *dev = ice_pf_to_dev(vf->pf); 737 struct ice_fdir_fltr *input = &conf->input; 738 int i; 739 740 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 741 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 742 proto->count, vf->vf_id); 743 return -EINVAL; 744 } 745 746 for (i = 0; i < proto->count; i++) { 747 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 748 struct ip_esp_hdr *esph; 749 struct ip_auth_hdr *ah; 750 struct sctphdr *sctph; 751 struct ipv6hdr *ip6h; 752 struct udphdr *udph; 753 struct tcphdr *tcph; 754 struct ethhdr *eth; 755 struct iphdr *iph; 756 u8 s_field; 757 u8 *rawh; 758 759 switch (hdr->type) { 760 case VIRTCHNL_PROTO_HDR_ETH: 761 eth = (struct ethhdr *)hdr->buffer; 762 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 763 764 if (hdr->field_selector) 765 input->ext_data.ether_type = eth->h_proto; 766 break; 767 case VIRTCHNL_PROTO_HDR_IPV4: 768 iph = (struct iphdr *)hdr->buffer; 769 l3 = VIRTCHNL_PROTO_HDR_IPV4; 770 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 771 772 if (hdr->field_selector) { 773 input->ip.v4.src_ip = iph->saddr; 774 input->ip.v4.dst_ip = iph->daddr; 775 input->ip.v4.tos = iph->tos; 776 input->ip.v4.proto = iph->protocol; 777 } 778 break; 779 case VIRTCHNL_PROTO_HDR_IPV6: 780 ip6h = (struct ipv6hdr *)hdr->buffer; 781 l3 = VIRTCHNL_PROTO_HDR_IPV6; 782 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 783 784 if (hdr->field_selector) { 785 memcpy(input->ip.v6.src_ip, 786 ip6h->saddr.in6_u.u6_addr8, 787 sizeof(ip6h->saddr)); 788 memcpy(input->ip.v6.dst_ip, 789 ip6h->daddr.in6_u.u6_addr8, 790 sizeof(ip6h->daddr)); 791 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 792 (ip6h->flow_lbl[0] >> 4); 793 input->ip.v6.proto = ip6h->nexthdr; 794 } 795 break; 796 case VIRTCHNL_PROTO_HDR_TCP: 797 tcph = (struct tcphdr *)hdr->buffer; 798 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 799 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 800 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 801 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 802 803 if (hdr->field_selector) { 804 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 805 input->ip.v4.src_port = tcph->source; 806 input->ip.v4.dst_port = tcph->dest; 807 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 808 input->ip.v6.src_port = tcph->source; 809 input->ip.v6.dst_port = tcph->dest; 810 } 811 } 812 break; 813 case VIRTCHNL_PROTO_HDR_UDP: 814 udph = (struct udphdr *)hdr->buffer; 815 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 816 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 817 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 818 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 819 820 if (hdr->field_selector) { 821 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 822 input->ip.v4.src_port = udph->source; 823 input->ip.v4.dst_port = udph->dest; 824 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 825 input->ip.v6.src_port = udph->source; 826 input->ip.v6.dst_port = udph->dest; 827 } 828 } 829 break; 830 case VIRTCHNL_PROTO_HDR_SCTP: 831 sctph = (struct sctphdr *)hdr->buffer; 832 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 833 input->flow_type = 834 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 835 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 836 input->flow_type = 837 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 838 839 if (hdr->field_selector) { 840 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 841 input->ip.v4.src_port = sctph->source; 842 input->ip.v4.dst_port = sctph->dest; 843 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 844 input->ip.v6.src_port = sctph->source; 845 input->ip.v6.dst_port = sctph->dest; 846 } 847 } 848 break; 849 case VIRTCHNL_PROTO_HDR_L2TPV3: 850 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 851 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 852 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 853 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 854 855 if (hdr->field_selector) 856 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 857 break; 858 case VIRTCHNL_PROTO_HDR_ESP: 859 esph = (struct ip_esp_hdr *)hdr->buffer; 860 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 861 l4 == VIRTCHNL_PROTO_HDR_UDP) 862 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 863 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 864 l4 == VIRTCHNL_PROTO_HDR_UDP) 865 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 866 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 867 l4 == VIRTCHNL_PROTO_HDR_NONE) 868 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 869 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 870 l4 == VIRTCHNL_PROTO_HDR_NONE) 871 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 872 873 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 874 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 875 else 876 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 877 878 if (hdr->field_selector) { 879 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 880 input->ip.v4.sec_parm_idx = esph->spi; 881 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 882 input->ip.v6.sec_parm_idx = esph->spi; 883 } 884 break; 885 case VIRTCHNL_PROTO_HDR_AH: 886 ah = (struct ip_auth_hdr *)hdr->buffer; 887 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 888 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 889 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 890 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 891 892 if (hdr->field_selector) { 893 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 894 input->ip.v4.sec_parm_idx = ah->spi; 895 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 896 input->ip.v6.sec_parm_idx = ah->spi; 897 } 898 break; 899 case VIRTCHNL_PROTO_HDR_PFCP: 900 rawh = (u8 *)hdr->buffer; 901 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 902 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 903 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 904 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 905 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 906 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 907 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 908 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 909 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 910 911 if (hdr->field_selector) { 912 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 913 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 914 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 915 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 916 } 917 break; 918 case VIRTCHNL_PROTO_HDR_GTPU_IP: 919 rawh = (u8 *)hdr->buffer; 920 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 921 922 if (hdr->field_selector) 923 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 924 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 925 break; 926 case VIRTCHNL_PROTO_HDR_GTPU_EH: 927 rawh = (u8 *)hdr->buffer; 928 929 if (hdr->field_selector) 930 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 931 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 932 break; 933 default: 934 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 935 hdr->type, vf->vf_id); 936 return -EINVAL; 937 } 938 } 939 940 return 0; 941 } 942 943 /** 944 * ice_vc_fdir_parse_action 945 * @vf: pointer to the VF info 946 * @fltr: virtual channel add cmd buffer 947 * @conf: FDIR configuration for each filter 948 * 949 * Parse the virtual channel filter's action and store them into conf 950 * 951 * Return: 0 on success, and other on error. 952 */ 953 static int 954 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 955 struct virtchnl_fdir_fltr_conf *conf) 956 { 957 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 958 struct device *dev = ice_pf_to_dev(vf->pf); 959 struct ice_fdir_fltr *input = &conf->input; 960 u32 dest_num = 0; 961 u32 mark_num = 0; 962 int i; 963 964 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 965 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 966 as->count, vf->vf_id); 967 return -EINVAL; 968 } 969 970 for (i = 0; i < as->count; i++) { 971 struct virtchnl_filter_action *action = &as->actions[i]; 972 973 switch (action->type) { 974 case VIRTCHNL_ACTION_PASSTHRU: 975 dest_num++; 976 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 977 break; 978 case VIRTCHNL_ACTION_DROP: 979 dest_num++; 980 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 981 break; 982 case VIRTCHNL_ACTION_QUEUE: 983 dest_num++; 984 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 985 input->q_index = action->act_conf.queue.index; 986 break; 987 case VIRTCHNL_ACTION_Q_REGION: 988 dest_num++; 989 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 990 input->q_index = action->act_conf.queue.index; 991 input->q_region = action->act_conf.queue.region; 992 break; 993 case VIRTCHNL_ACTION_MARK: 994 mark_num++; 995 input->fltr_id = action->act_conf.mark_id; 996 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 997 break; 998 default: 999 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1000 action->type, vf->vf_id); 1001 return -EINVAL; 1002 } 1003 } 1004 1005 if (dest_num == 0 || dest_num >= 2) { 1006 dev_dbg(dev, "Invalid destination action for VF %d\n", 1007 vf->vf_id); 1008 return -EINVAL; 1009 } 1010 1011 if (mark_num >= 2) { 1012 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1013 return -EINVAL; 1014 } 1015 1016 return 0; 1017 } 1018 1019 /** 1020 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1021 * @vf: pointer to the VF info 1022 * @fltr: virtual channel add cmd buffer 1023 * @conf: FDIR configuration for each filter 1024 * 1025 * Return: 0 on success, and other on error. 1026 */ 1027 static int 1028 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1029 struct virtchnl_fdir_fltr_conf *conf) 1030 { 1031 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1032 int ret; 1033 1034 if (!ice_vc_validate_pattern(vf, proto)) 1035 return -EINVAL; 1036 1037 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1038 if (ret) 1039 return ret; 1040 1041 return ice_vc_fdir_parse_action(vf, fltr, conf); 1042 } 1043 1044 /** 1045 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1046 * @conf_a: FDIR configuration for filter a 1047 * @conf_b: FDIR configuration for filter b 1048 * 1049 * Return: 0 on success, and other on error. 1050 */ 1051 static bool 1052 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1053 struct virtchnl_fdir_fltr_conf *conf_b) 1054 { 1055 struct ice_fdir_fltr *a = &conf_a->input; 1056 struct ice_fdir_fltr *b = &conf_b->input; 1057 1058 if (conf_a->ttype != conf_b->ttype) 1059 return false; 1060 if (a->flow_type != b->flow_type) 1061 return false; 1062 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1063 return false; 1064 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1065 return false; 1066 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1067 return false; 1068 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1069 return false; 1070 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1071 return false; 1072 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1073 return false; 1074 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1075 return false; 1076 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1077 return false; 1078 1079 return true; 1080 } 1081 1082 /** 1083 * ice_vc_fdir_is_dup_fltr 1084 * @vf: pointer to the VF info 1085 * @conf: FDIR configuration for each filter 1086 * 1087 * Check if there is duplicated rule with same conf value 1088 * 1089 * Return: 0 true success, and false on error. 1090 */ 1091 static bool 1092 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1093 { 1094 struct ice_fdir_fltr *desc; 1095 bool ret; 1096 1097 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1098 struct virtchnl_fdir_fltr_conf *node = 1099 to_fltr_conf_from_desc(desc); 1100 1101 ret = ice_vc_fdir_comp_rules(node, conf); 1102 if (ret) 1103 return true; 1104 } 1105 1106 return false; 1107 } 1108 1109 /** 1110 * ice_vc_fdir_insert_entry 1111 * @vf: pointer to the VF info 1112 * @conf: FDIR configuration for each filter 1113 * @id: pointer to ID value allocated by driver 1114 * 1115 * Insert FDIR conf entry into list and allocate ID for this filter 1116 * 1117 * Return: 0 true success, and other on error. 1118 */ 1119 static int 1120 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1121 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1122 { 1123 struct ice_fdir_fltr *input = &conf->input; 1124 int i; 1125 1126 /* alloc ID corresponding with conf */ 1127 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1128 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1129 if (i < 0) 1130 return -EINVAL; 1131 *id = i; 1132 1133 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1134 return 0; 1135 } 1136 1137 /** 1138 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1139 * @vf: pointer to the VF info 1140 * @conf: FDIR configuration for each filter 1141 * @id: filter rule's ID 1142 */ 1143 static void 1144 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1145 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1146 { 1147 struct ice_fdir_fltr *input = &conf->input; 1148 1149 idr_remove(&vf->fdir.fdir_rule_idr, id); 1150 list_del(&input->fltr_node); 1151 } 1152 1153 /** 1154 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1155 * @vf: pointer to the VF info 1156 * @id: filter rule's ID 1157 * 1158 * Return: NULL on error, and other on success. 1159 */ 1160 static struct virtchnl_fdir_fltr_conf * 1161 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1162 { 1163 return idr_find(&vf->fdir.fdir_rule_idr, id); 1164 } 1165 1166 /** 1167 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1168 * @vf: pointer to the VF info 1169 */ 1170 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1171 { 1172 struct virtchnl_fdir_fltr_conf *conf; 1173 struct ice_fdir_fltr *desc, *temp; 1174 1175 list_for_each_entry_safe(desc, temp, 1176 &vf->fdir.fdir_rule_list, fltr_node) { 1177 conf = to_fltr_conf_from_desc(desc); 1178 list_del(&desc->fltr_node); 1179 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1180 } 1181 } 1182 1183 /** 1184 * ice_vc_fdir_write_fltr - write filter rule into hardware 1185 * @vf: pointer to the VF info 1186 * @conf: FDIR configuration for each filter 1187 * @add: true implies add rule, false implies del rules 1188 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1189 * 1190 * Return: 0 on success, and other on error. 1191 */ 1192 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1193 struct virtchnl_fdir_fltr_conf *conf, 1194 bool add, bool is_tun) 1195 { 1196 struct ice_fdir_fltr *input = &conf->input; 1197 struct ice_vsi *vsi, *ctrl_vsi; 1198 struct ice_fltr_desc desc; 1199 struct device *dev; 1200 struct ice_pf *pf; 1201 struct ice_hw *hw; 1202 int ret; 1203 u8 *pkt; 1204 1205 pf = vf->pf; 1206 dev = ice_pf_to_dev(pf); 1207 hw = &pf->hw; 1208 vsi = pf->vsi[vf->lan_vsi_idx]; 1209 if (!vsi) { 1210 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1211 return -EINVAL; 1212 } 1213 1214 input->dest_vsi = vsi->idx; 1215 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1216 1217 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1218 if (!ctrl_vsi) { 1219 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1220 return -EINVAL; 1221 } 1222 1223 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1224 if (!pkt) 1225 return -ENOMEM; 1226 1227 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1228 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1229 if (ret) { 1230 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1231 vf->vf_id, input->flow_type); 1232 goto err_free_pkt; 1233 } 1234 1235 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1236 if (ret) 1237 goto err_free_pkt; 1238 1239 return 0; 1240 1241 err_free_pkt: 1242 devm_kfree(dev, pkt); 1243 return ret; 1244 } 1245 1246 /** 1247 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1248 * @t: pointer to timer_list 1249 */ 1250 static void ice_vf_fdir_timer(struct timer_list *t) 1251 { 1252 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1253 struct ice_vf_fdir_ctx *ctx_done; 1254 struct ice_vf_fdir *fdir; 1255 unsigned long flags; 1256 struct ice_vf *vf; 1257 struct ice_pf *pf; 1258 1259 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1260 vf = container_of(fdir, struct ice_vf, fdir); 1261 ctx_done = &fdir->ctx_done; 1262 pf = vf->pf; 1263 spin_lock_irqsave(&fdir->ctx_lock, flags); 1264 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1265 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1266 WARN_ON_ONCE(1); 1267 return; 1268 } 1269 1270 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1271 1272 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1273 ctx_done->conf = ctx_irq->conf; 1274 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1275 ctx_done->v_opcode = ctx_irq->v_opcode; 1276 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1277 1278 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1279 ice_service_task_schedule(pf); 1280 } 1281 1282 /** 1283 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1284 * @ctrl_vsi: pointer to a VF's CTRL VSI 1285 * @rx_desc: pointer to FDIR Rx queue descriptor 1286 */ 1287 void 1288 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1289 union ice_32b_rx_flex_desc *rx_desc) 1290 { 1291 struct ice_pf *pf = ctrl_vsi->back; 1292 struct ice_vf *vf = ctrl_vsi->vf; 1293 struct ice_vf_fdir_ctx *ctx_done; 1294 struct ice_vf_fdir_ctx *ctx_irq; 1295 struct ice_vf_fdir *fdir; 1296 unsigned long flags; 1297 struct device *dev; 1298 int ret; 1299 1300 if (WARN_ON(!vf)) 1301 return; 1302 1303 fdir = &vf->fdir; 1304 ctx_done = &fdir->ctx_done; 1305 ctx_irq = &fdir->ctx_irq; 1306 dev = ice_pf_to_dev(pf); 1307 spin_lock_irqsave(&fdir->ctx_lock, flags); 1308 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1309 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1310 WARN_ON_ONCE(1); 1311 return; 1312 } 1313 1314 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1315 1316 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1317 ctx_done->conf = ctx_irq->conf; 1318 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1319 ctx_done->v_opcode = ctx_irq->v_opcode; 1320 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1321 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1322 1323 ret = del_timer(&ctx_irq->rx_tmr); 1324 if (!ret) 1325 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1326 1327 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1328 ice_service_task_schedule(pf); 1329 } 1330 1331 /** 1332 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1333 * @vf: pointer to the VF info 1334 */ 1335 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1336 { 1337 struct ice_vsi *vf_vsi; 1338 u32 fd_size, fd_cnt; 1339 struct device *dev; 1340 struct ice_pf *pf; 1341 struct ice_hw *hw; 1342 u16 vsi_num; 1343 1344 pf = vf->pf; 1345 hw = &pf->hw; 1346 dev = ice_pf_to_dev(pf); 1347 vf_vsi = pf->vsi[vf->lan_vsi_idx]; 1348 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1349 1350 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1351 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1352 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", 1353 vf->vf_id, 1354 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1355 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1356 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1357 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1358 } 1359 1360 /** 1361 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1362 * @vf: pointer to the VF info 1363 * @ctx: FDIR context info for post processing 1364 * @status: virtchnl FDIR program status 1365 * 1366 * Return: 0 on success, and other on error. 1367 */ 1368 static int 1369 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1370 enum virtchnl_fdir_prgm_status *status) 1371 { 1372 struct device *dev = ice_pf_to_dev(vf->pf); 1373 u32 stat_err, error, prog_id; 1374 int ret; 1375 1376 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1377 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1378 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1379 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1380 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1381 ret = -EINVAL; 1382 goto err_exit; 1383 } 1384 1385 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1386 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1387 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1388 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1389 dev_err(dev, "VF %d: Desc show add, but ctx not", 1390 vf->vf_id); 1391 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1392 ret = -EINVAL; 1393 goto err_exit; 1394 } 1395 1396 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1397 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1398 dev_err(dev, "VF %d: Desc show del, but ctx not", 1399 vf->vf_id); 1400 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1401 ret = -EINVAL; 1402 goto err_exit; 1403 } 1404 1405 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1406 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1407 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1408 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1409 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1410 vf->vf_id); 1411 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1412 } else { 1413 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1414 vf->vf_id); 1415 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1416 } 1417 ret = -EINVAL; 1418 goto err_exit; 1419 } 1420 1421 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1422 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1423 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1424 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1425 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1426 ret = -EINVAL; 1427 goto err_exit; 1428 } 1429 1430 *status = VIRTCHNL_FDIR_SUCCESS; 1431 1432 return 0; 1433 1434 err_exit: 1435 ice_vf_fdir_dump_info(vf); 1436 return ret; 1437 } 1438 1439 /** 1440 * ice_vc_add_fdir_fltr_post 1441 * @vf: pointer to the VF structure 1442 * @ctx: FDIR context info for post processing 1443 * @status: virtchnl FDIR program status 1444 * @success: true implies success, false implies failure 1445 * 1446 * Post process for flow director add command. If success, then do post process 1447 * and send back success msg by virtchnl. Otherwise, do context reversion and 1448 * send back failure msg by virtchnl. 1449 * 1450 * Return: 0 on success, and other on error. 1451 */ 1452 static int 1453 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1454 enum virtchnl_fdir_prgm_status status, 1455 bool success) 1456 { 1457 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1458 struct device *dev = ice_pf_to_dev(vf->pf); 1459 enum virtchnl_status_code v_ret; 1460 struct virtchnl_fdir_add *resp; 1461 int ret, len, is_tun; 1462 1463 v_ret = VIRTCHNL_STATUS_SUCCESS; 1464 len = sizeof(*resp); 1465 resp = kzalloc(len, GFP_KERNEL); 1466 if (!resp) { 1467 len = 0; 1468 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1469 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1470 goto err_exit; 1471 } 1472 1473 if (!success) 1474 goto err_exit; 1475 1476 is_tun = 0; 1477 resp->status = status; 1478 resp->flow_id = conf->flow_id; 1479 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1480 1481 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1482 (u8 *)resp, len); 1483 kfree(resp); 1484 1485 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1486 vf->vf_id, conf->flow_id, 1487 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1488 "add" : "del"); 1489 return ret; 1490 1491 err_exit: 1492 if (resp) 1493 resp->status = status; 1494 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1495 devm_kfree(dev, conf); 1496 1497 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1498 (u8 *)resp, len); 1499 kfree(resp); 1500 return ret; 1501 } 1502 1503 /** 1504 * ice_vc_del_fdir_fltr_post 1505 * @vf: pointer to the VF structure 1506 * @ctx: FDIR context info for post processing 1507 * @status: virtchnl FDIR program status 1508 * @success: true implies success, false implies failure 1509 * 1510 * Post process for flow director del command. If success, then do post process 1511 * and send back success msg by virtchnl. Otherwise, do context reversion and 1512 * send back failure msg by virtchnl. 1513 * 1514 * Return: 0 on success, and other on error. 1515 */ 1516 static int 1517 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1518 enum virtchnl_fdir_prgm_status status, 1519 bool success) 1520 { 1521 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1522 struct device *dev = ice_pf_to_dev(vf->pf); 1523 enum virtchnl_status_code v_ret; 1524 struct virtchnl_fdir_del *resp; 1525 int ret, len, is_tun; 1526 1527 v_ret = VIRTCHNL_STATUS_SUCCESS; 1528 len = sizeof(*resp); 1529 resp = kzalloc(len, GFP_KERNEL); 1530 if (!resp) { 1531 len = 0; 1532 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1533 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1534 goto err_exit; 1535 } 1536 1537 if (!success) 1538 goto err_exit; 1539 1540 is_tun = 0; 1541 resp->status = status; 1542 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1543 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1544 1545 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1546 (u8 *)resp, len); 1547 kfree(resp); 1548 1549 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1550 vf->vf_id, conf->flow_id, 1551 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1552 "add" : "del"); 1553 devm_kfree(dev, conf); 1554 return ret; 1555 1556 err_exit: 1557 if (resp) 1558 resp->status = status; 1559 if (success) 1560 devm_kfree(dev, conf); 1561 1562 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1563 (u8 *)resp, len); 1564 kfree(resp); 1565 return ret; 1566 } 1567 1568 /** 1569 * ice_flush_fdir_ctx 1570 * @pf: pointer to the PF structure 1571 * 1572 * Flush all the pending event on ctx_done list and process them. 1573 */ 1574 void ice_flush_fdir_ctx(struct ice_pf *pf) 1575 { 1576 struct ice_vf *vf; 1577 unsigned int bkt; 1578 1579 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1580 return; 1581 1582 mutex_lock(&pf->vfs.table_lock); 1583 ice_for_each_vf(pf, bkt, vf) { 1584 struct device *dev = ice_pf_to_dev(pf); 1585 enum virtchnl_fdir_prgm_status status; 1586 struct ice_vf_fdir_ctx *ctx; 1587 unsigned long flags; 1588 int ret; 1589 1590 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1591 continue; 1592 1593 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1594 continue; 1595 1596 ctx = &vf->fdir.ctx_done; 1597 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1598 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1599 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1600 continue; 1601 } 1602 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1603 1604 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1605 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1606 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1607 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1608 vf->vf_id); 1609 goto err_exit; 1610 } 1611 1612 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1613 if (ret) 1614 goto err_exit; 1615 1616 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1617 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1618 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1619 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1620 else 1621 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1622 1623 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1624 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1625 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1626 continue; 1627 err_exit: 1628 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1629 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1630 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1631 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1632 else 1633 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1634 1635 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1636 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1637 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1638 } 1639 mutex_unlock(&pf->vfs.table_lock); 1640 } 1641 1642 /** 1643 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1644 * @vf: pointer to the VF structure 1645 * @conf: FDIR configuration for each filter 1646 * @v_opcode: virtual channel operation code 1647 * 1648 * Return: 0 on success, and other on error. 1649 */ 1650 static int 1651 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1652 enum virtchnl_ops v_opcode) 1653 { 1654 struct device *dev = ice_pf_to_dev(vf->pf); 1655 struct ice_vf_fdir_ctx *ctx; 1656 unsigned long flags; 1657 1658 ctx = &vf->fdir.ctx_irq; 1659 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1660 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1661 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1662 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1663 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1664 vf->vf_id); 1665 return -EBUSY; 1666 } 1667 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1668 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1669 1670 ctx->conf = conf; 1671 ctx->v_opcode = v_opcode; 1672 ctx->stat = ICE_FDIR_CTX_READY; 1673 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1674 1675 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1676 1677 return 0; 1678 } 1679 1680 /** 1681 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1682 * @vf: pointer to the VF structure 1683 * 1684 * Return: 0 on success, and other on error. 1685 */ 1686 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1687 { 1688 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1689 unsigned long flags; 1690 1691 del_timer(&ctx->rx_tmr); 1692 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1693 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1694 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1695 } 1696 1697 /** 1698 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1699 * @vf: pointer to the VF info 1700 * @msg: pointer to the msg buffer 1701 * 1702 * Return: 0 on success, and other on error. 1703 */ 1704 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1705 { 1706 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1707 struct virtchnl_fdir_add *stat = NULL; 1708 struct virtchnl_fdir_fltr_conf *conf; 1709 enum virtchnl_status_code v_ret; 1710 struct device *dev; 1711 struct ice_pf *pf; 1712 int is_tun = 0; 1713 int len = 0; 1714 int ret; 1715 1716 pf = vf->pf; 1717 dev = ice_pf_to_dev(pf); 1718 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1719 if (ret) { 1720 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1721 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1722 goto err_exit; 1723 } 1724 1725 ret = ice_vf_start_ctrl_vsi(vf); 1726 if (ret && (ret != -EEXIST)) { 1727 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1728 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1729 vf->vf_id, ret); 1730 goto err_exit; 1731 } 1732 1733 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1734 if (!stat) { 1735 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1736 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1737 goto err_exit; 1738 } 1739 1740 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1741 if (!conf) { 1742 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1743 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1744 goto err_exit; 1745 } 1746 1747 len = sizeof(*stat); 1748 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1749 if (ret) { 1750 v_ret = VIRTCHNL_STATUS_SUCCESS; 1751 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1752 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1753 goto err_free_conf; 1754 } 1755 1756 if (fltr->validate_only) { 1757 v_ret = VIRTCHNL_STATUS_SUCCESS; 1758 stat->status = VIRTCHNL_FDIR_SUCCESS; 1759 devm_kfree(dev, conf); 1760 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1761 v_ret, (u8 *)stat, len); 1762 goto exit; 1763 } 1764 1765 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1766 if (ret) { 1767 v_ret = VIRTCHNL_STATUS_SUCCESS; 1768 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1769 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1770 vf->vf_id, ret); 1771 goto err_free_conf; 1772 } 1773 1774 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1775 if (ret) { 1776 v_ret = VIRTCHNL_STATUS_SUCCESS; 1777 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1778 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1779 vf->vf_id); 1780 goto err_free_conf; 1781 } 1782 1783 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1784 if (ret) { 1785 v_ret = VIRTCHNL_STATUS_SUCCESS; 1786 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1787 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1788 goto err_free_conf; 1789 } 1790 1791 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1792 if (ret) { 1793 v_ret = VIRTCHNL_STATUS_SUCCESS; 1794 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1795 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1796 goto err_free_conf; 1797 } 1798 1799 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1800 if (ret) { 1801 v_ret = VIRTCHNL_STATUS_SUCCESS; 1802 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1803 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1804 vf->vf_id, ret); 1805 goto err_rem_entry; 1806 } 1807 1808 exit: 1809 kfree(stat); 1810 return ret; 1811 1812 err_rem_entry: 1813 ice_vc_fdir_clear_irq_ctx(vf); 1814 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1815 err_free_conf: 1816 devm_kfree(dev, conf); 1817 err_exit: 1818 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1819 (u8 *)stat, len); 1820 kfree(stat); 1821 return ret; 1822 } 1823 1824 /** 1825 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1826 * @vf: pointer to the VF info 1827 * @msg: pointer to the msg buffer 1828 * 1829 * Return: 0 on success, and other on error. 1830 */ 1831 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1832 { 1833 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1834 struct virtchnl_fdir_del *stat = NULL; 1835 struct virtchnl_fdir_fltr_conf *conf; 1836 enum virtchnl_status_code v_ret; 1837 struct device *dev; 1838 struct ice_pf *pf; 1839 int is_tun = 0; 1840 int len = 0; 1841 int ret; 1842 1843 pf = vf->pf; 1844 dev = ice_pf_to_dev(pf); 1845 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1846 if (ret) { 1847 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1848 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1849 goto err_exit; 1850 } 1851 1852 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1853 if (!stat) { 1854 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1855 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1856 goto err_exit; 1857 } 1858 1859 len = sizeof(*stat); 1860 1861 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1862 if (!conf) { 1863 v_ret = VIRTCHNL_STATUS_SUCCESS; 1864 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1865 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1866 vf->vf_id, fltr->flow_id); 1867 goto err_exit; 1868 } 1869 1870 /* Just return failure when ctrl_vsi idx is invalid */ 1871 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1872 v_ret = VIRTCHNL_STATUS_SUCCESS; 1873 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1874 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1875 goto err_exit; 1876 } 1877 1878 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1879 if (ret) { 1880 v_ret = VIRTCHNL_STATUS_SUCCESS; 1881 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1882 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1883 goto err_exit; 1884 } 1885 1886 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1887 if (ret) { 1888 v_ret = VIRTCHNL_STATUS_SUCCESS; 1889 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1890 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1891 vf->vf_id, ret); 1892 goto err_del_tmr; 1893 } 1894 1895 kfree(stat); 1896 1897 return ret; 1898 1899 err_del_tmr: 1900 ice_vc_fdir_clear_irq_ctx(vf); 1901 err_exit: 1902 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 1903 (u8 *)stat, len); 1904 kfree(stat); 1905 return ret; 1906 } 1907 1908 /** 1909 * ice_vf_fdir_init - init FDIR resource for VF 1910 * @vf: pointer to the VF info 1911 */ 1912 void ice_vf_fdir_init(struct ice_vf *vf) 1913 { 1914 struct ice_vf_fdir *fdir = &vf->fdir; 1915 1916 idr_init(&fdir->fdir_rule_idr); 1917 INIT_LIST_HEAD(&fdir->fdir_rule_list); 1918 1919 spin_lock_init(&fdir->ctx_lock); 1920 fdir->ctx_irq.flags = 0; 1921 fdir->ctx_done.flags = 0; 1922 } 1923 1924 /** 1925 * ice_vf_fdir_exit - destroy FDIR resource for VF 1926 * @vf: pointer to the VF info 1927 */ 1928 void ice_vf_fdir_exit(struct ice_vf *vf) 1929 { 1930 ice_vc_fdir_flush_entry(vf); 1931 idr_destroy(&vf->fdir.fdir_rule_idr); 1932 ice_vc_fdir_rem_prof_all(vf); 1933 ice_vc_fdir_free_prof_all(vf); 1934 } 1935