1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_base.h" 6 #include "ice_lib.h" 7 #include "ice_flow.h" 8 #include "ice_vf_lib_private.h" 9 10 #define to_fltr_conf_from_desc(p) \ 11 container_of(p, struct virtchnl_fdir_fltr_conf, input) 12 13 #define ICE_FLOW_PROF_TYPE_S 0 14 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S) 15 #define ICE_FLOW_PROF_VSI_S 32 16 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S) 17 18 /* Flow profile ID format: 19 * [0:31] - flow type, flow + tun_offs 20 * [32:63] - VSI index 21 */ 22 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \ 23 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \ 24 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M))) 25 26 #define GTPU_TEID_OFFSET 4 27 #define GTPU_EH_QFI_OFFSET 1 28 #define GTPU_EH_QFI_MASK 0x3F 29 #define PFCP_S_OFFSET 0 30 #define PFCP_S_MASK 0x1 31 #define PFCP_PORT_NR 8805 32 33 #define FDIR_INSET_FLAG_ESP_S 0 34 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S) 35 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S) 36 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S) 37 38 enum ice_fdir_tunnel_type { 39 ICE_FDIR_TUNNEL_TYPE_NONE = 0, 40 ICE_FDIR_TUNNEL_TYPE_GTPU, 41 ICE_FDIR_TUNNEL_TYPE_GTPU_EH, 42 }; 43 44 struct virtchnl_fdir_fltr_conf { 45 struct ice_fdir_fltr input; 46 enum ice_fdir_tunnel_type ttype; 47 u64 inset_flag; 48 u32 flow_id; 49 }; 50 51 struct virtchnl_fdir_inset_map { 52 enum virtchnl_proto_hdr_field field; 53 enum ice_flow_field fld; 54 u64 flag; 55 u64 mask; 56 }; 57 58 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { 59 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0}, 60 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0}, 61 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0}, 62 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0}, 63 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0}, 64 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0}, 65 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0}, 66 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0}, 67 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0}, 68 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0}, 69 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0}, 70 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0}, 71 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 72 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0}, 73 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0}, 74 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0}, 75 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0}, 76 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0}, 77 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0}, 78 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI, 79 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M}, 80 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, 81 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M}, 82 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0}, 83 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0}, 84 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0}, 85 }; 86 87 /** 88 * ice_vc_fdir_param_check 89 * @vf: pointer to the VF structure 90 * @vsi_id: VF relative VSI ID 91 * 92 * Check for the valid VSI ID, PF's state and VF's state 93 * 94 * Return: 0 on success, and -EINVAL on error. 95 */ 96 static int 97 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) 98 { 99 struct ice_pf *pf = vf->pf; 100 101 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) 102 return -EINVAL; 103 104 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 105 return -EINVAL; 106 107 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) 108 return -EINVAL; 109 110 if (vsi_id != vf->lan_vsi_num) 111 return -EINVAL; 112 113 if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) 114 return -EINVAL; 115 116 if (!ice_get_vf_vsi(vf)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 /** 123 * ice_vf_start_ctrl_vsi 124 * @vf: pointer to the VF structure 125 * 126 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF 127 * 128 * Return: 0 on success, and other on error. 129 */ 130 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) 131 { 132 struct ice_pf *pf = vf->pf; 133 struct ice_vsi *ctrl_vsi; 134 struct device *dev; 135 int err; 136 137 dev = ice_pf_to_dev(pf); 138 if (vf->ctrl_vsi_idx != ICE_NO_VSI) 139 return -EEXIST; 140 141 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); 142 if (!ctrl_vsi) { 143 dev_dbg(dev, "Could not setup control VSI for VF %d\n", 144 vf->vf_id); 145 return -ENOMEM; 146 } 147 148 err = ice_vsi_open_ctrl(ctrl_vsi); 149 if (err) { 150 dev_dbg(dev, "Could not open control VSI for VF %d\n", 151 vf->vf_id); 152 goto err_vsi_open; 153 } 154 155 return 0; 156 157 err_vsi_open: 158 ice_vsi_release(ctrl_vsi); 159 if (vf->ctrl_vsi_idx != ICE_NO_VSI) { 160 pf->vsi[vf->ctrl_vsi_idx] = NULL; 161 vf->ctrl_vsi_idx = ICE_NO_VSI; 162 } 163 return err; 164 } 165 166 /** 167 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type 168 * @vf: pointer to the VF structure 169 * @flow: filter flow type 170 * 171 * Return: 0 on success, and other on error. 172 */ 173 static int 174 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 175 { 176 struct ice_vf_fdir *fdir = &vf->fdir; 177 178 if (!fdir->fdir_prof) { 179 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), 180 ICE_FLTR_PTYPE_MAX, 181 sizeof(*fdir->fdir_prof), 182 GFP_KERNEL); 183 if (!fdir->fdir_prof) 184 return -ENOMEM; 185 } 186 187 if (!fdir->fdir_prof[flow]) { 188 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), 189 sizeof(**fdir->fdir_prof), 190 GFP_KERNEL); 191 if (!fdir->fdir_prof[flow]) 192 return -ENOMEM; 193 } 194 195 return 0; 196 } 197 198 /** 199 * ice_vc_fdir_free_prof - free profile for this filter flow type 200 * @vf: pointer to the VF structure 201 * @flow: filter flow type 202 */ 203 static void 204 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) 205 { 206 struct ice_vf_fdir *fdir = &vf->fdir; 207 208 if (!fdir->fdir_prof) 209 return; 210 211 if (!fdir->fdir_prof[flow]) 212 return; 213 214 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); 215 fdir->fdir_prof[flow] = NULL; 216 } 217 218 /** 219 * ice_vc_fdir_free_prof_all - free all the profile for this VF 220 * @vf: pointer to the VF structure 221 */ 222 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) 223 { 224 struct ice_vf_fdir *fdir = &vf->fdir; 225 enum ice_fltr_ptype flow; 226 227 if (!fdir->fdir_prof) 228 return; 229 230 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++) 231 ice_vc_fdir_free_prof(vf, flow); 232 233 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); 234 fdir->fdir_prof = NULL; 235 } 236 237 /** 238 * ice_vc_fdir_parse_flow_fld 239 * @proto_hdr: virtual channel protocol filter header 240 * @conf: FDIR configuration for each filter 241 * @fld: field type array 242 * @fld_cnt: field counter 243 * 244 * Parse the virtual channel filter header and store them into field type array 245 * 246 * Return: 0 on success, and other on error. 247 */ 248 static int 249 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, 250 struct virtchnl_fdir_fltr_conf *conf, 251 enum ice_flow_field *fld, int *fld_cnt) 252 { 253 struct virtchnl_proto_hdr hdr; 254 u32 i; 255 256 memcpy(&hdr, proto_hdr, sizeof(hdr)); 257 258 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) && 259 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++) 260 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) { 261 if (fdir_inset_map[i].mask && 262 ((fdir_inset_map[i].mask & conf->inset_flag) != 263 fdir_inset_map[i].flag)) 264 continue; 265 266 fld[*fld_cnt] = fdir_inset_map[i].fld; 267 *fld_cnt += 1; 268 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX) 269 return -EINVAL; 270 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr, 271 fdir_inset_map[i].field); 272 } 273 274 return 0; 275 } 276 277 /** 278 * ice_vc_fdir_set_flow_fld 279 * @vf: pointer to the VF structure 280 * @fltr: virtual channel add cmd buffer 281 * @conf: FDIR configuration for each filter 282 * @seg: array of one or more packet segments that describe the flow 283 * 284 * Parse the virtual channel add msg buffer's field vector and store them into 285 * flow's packet segment field 286 * 287 * Return: 0 on success, and other on error. 288 */ 289 static int 290 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 291 struct virtchnl_fdir_fltr_conf *conf, 292 struct ice_flow_seg_info *seg) 293 { 294 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; 295 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX]; 296 struct device *dev = ice_pf_to_dev(vf->pf); 297 struct virtchnl_proto_hdrs *proto; 298 int fld_cnt = 0; 299 int i; 300 301 proto = &rule->proto_hdrs; 302 for (i = 0; i < proto->count; i++) { 303 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 304 int ret; 305 306 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); 307 if (ret) 308 return ret; 309 } 310 311 if (fld_cnt == 0) { 312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); 313 return -EINVAL; 314 } 315 316 for (i = 0; i < fld_cnt; i++) 317 ice_flow_set_fld(seg, fld[i], 318 ICE_FLOW_FLD_OFF_INVAL, 319 ICE_FLOW_FLD_OFF_INVAL, 320 ICE_FLOW_FLD_OFF_INVAL, false); 321 322 return 0; 323 } 324 325 /** 326 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header 327 * @vf: pointer to the VF structure 328 * @conf: FDIR configuration for each filter 329 * @seg: array of one or more packet segments that describe the flow 330 * 331 * Return: 0 on success, and other on error. 332 */ 333 static int 334 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, 335 struct virtchnl_fdir_fltr_conf *conf, 336 struct ice_flow_seg_info *seg) 337 { 338 enum ice_fltr_ptype flow = conf->input.flow_type; 339 enum ice_fdir_tunnel_type ttype = conf->ttype; 340 struct device *dev = ice_pf_to_dev(vf->pf); 341 342 switch (flow) { 343 case ICE_FLTR_PTYPE_NON_IP_L2: 344 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP); 345 break; 346 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3: 347 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 348 ICE_FLOW_SEG_HDR_IPV4 | 349 ICE_FLOW_SEG_HDR_IPV_OTHER); 350 break; 351 case ICE_FLTR_PTYPE_NONF_IPV4_ESP: 352 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 353 ICE_FLOW_SEG_HDR_IPV4 | 354 ICE_FLOW_SEG_HDR_IPV_OTHER); 355 break; 356 case ICE_FLTR_PTYPE_NONF_IPV4_AH: 357 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 358 ICE_FLOW_SEG_HDR_IPV4 | 359 ICE_FLOW_SEG_HDR_IPV_OTHER); 360 break; 361 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP: 362 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 363 ICE_FLOW_SEG_HDR_IPV4 | 364 ICE_FLOW_SEG_HDR_IPV_OTHER); 365 break; 366 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE: 367 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 368 ICE_FLOW_SEG_HDR_IPV4 | 369 ICE_FLOW_SEG_HDR_IPV_OTHER); 370 break; 371 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION: 372 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 373 ICE_FLOW_SEG_HDR_IPV4 | 374 ICE_FLOW_SEG_HDR_IPV_OTHER); 375 break; 376 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 377 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | 378 ICE_FLOW_SEG_HDR_IPV_OTHER); 379 break; 380 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 381 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 382 ICE_FLOW_SEG_HDR_IPV4 | 383 ICE_FLOW_SEG_HDR_IPV_OTHER); 384 break; 385 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 386 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 387 ICE_FLOW_SEG_HDR_IPV4 | 388 ICE_FLOW_SEG_HDR_IPV_OTHER); 389 break; 390 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP: 391 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP: 392 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP: 393 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER: 394 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) { 395 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP | 396 ICE_FLOW_SEG_HDR_IPV4 | 397 ICE_FLOW_SEG_HDR_IPV_OTHER); 398 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) { 399 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH | 400 ICE_FLOW_SEG_HDR_GTPU_IP | 401 ICE_FLOW_SEG_HDR_IPV4 | 402 ICE_FLOW_SEG_HDR_IPV_OTHER); 403 } else { 404 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n", 405 flow, vf->vf_id); 406 return -EINVAL; 407 } 408 break; 409 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 410 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 411 ICE_FLOW_SEG_HDR_IPV4 | 412 ICE_FLOW_SEG_HDR_IPV_OTHER); 413 break; 414 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3: 415 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 | 416 ICE_FLOW_SEG_HDR_IPV6 | 417 ICE_FLOW_SEG_HDR_IPV_OTHER); 418 break; 419 case ICE_FLTR_PTYPE_NONF_IPV6_ESP: 420 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP | 421 ICE_FLOW_SEG_HDR_IPV6 | 422 ICE_FLOW_SEG_HDR_IPV_OTHER); 423 break; 424 case ICE_FLTR_PTYPE_NONF_IPV6_AH: 425 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH | 426 ICE_FLOW_SEG_HDR_IPV6 | 427 ICE_FLOW_SEG_HDR_IPV_OTHER); 428 break; 429 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP: 430 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP | 431 ICE_FLOW_SEG_HDR_IPV6 | 432 ICE_FLOW_SEG_HDR_IPV_OTHER); 433 break; 434 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE: 435 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE | 436 ICE_FLOW_SEG_HDR_IPV6 | 437 ICE_FLOW_SEG_HDR_IPV_OTHER); 438 break; 439 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION: 440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION | 441 ICE_FLOW_SEG_HDR_IPV6 | 442 ICE_FLOW_SEG_HDR_IPV_OTHER); 443 break; 444 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 445 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | 446 ICE_FLOW_SEG_HDR_IPV_OTHER); 447 break; 448 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 449 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | 450 ICE_FLOW_SEG_HDR_IPV6 | 451 ICE_FLOW_SEG_HDR_IPV_OTHER); 452 break; 453 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 454 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | 455 ICE_FLOW_SEG_HDR_IPV6 | 456 ICE_FLOW_SEG_HDR_IPV_OTHER); 457 break; 458 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | 460 ICE_FLOW_SEG_HDR_IPV6 | 461 ICE_FLOW_SEG_HDR_IPV_OTHER); 462 break; 463 default: 464 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n", 465 flow, vf->vf_id); 466 return -EINVAL; 467 } 468 469 return 0; 470 } 471 472 /** 473 * ice_vc_fdir_rem_prof - remove profile for this filter flow type 474 * @vf: pointer to the VF structure 475 * @flow: filter flow type 476 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 477 */ 478 static void 479 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) 480 { 481 struct ice_vf_fdir *fdir = &vf->fdir; 482 struct ice_fd_hw_prof *vf_prof; 483 struct ice_pf *pf = vf->pf; 484 struct ice_vsi *vf_vsi; 485 struct device *dev; 486 struct ice_hw *hw; 487 u64 prof_id; 488 int i; 489 490 dev = ice_pf_to_dev(pf); 491 hw = &pf->hw; 492 if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) 493 return; 494 495 vf_prof = fdir->fdir_prof[flow]; 496 497 vf_vsi = ice_get_vf_vsi(vf); 498 if (!vf_vsi) { 499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); 500 return; 501 } 502 503 if (!fdir->prof_entry_cnt[flow][tun]) 504 return; 505 506 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, 507 flow, tun ? ICE_FLTR_PTYPE_MAX : 0); 508 509 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) 510 if (vf_prof->entry_h[i][tun]) { 511 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); 512 513 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); 514 ice_flow_rem_entry(hw, ICE_BLK_FD, 515 vf_prof->entry_h[i][tun]); 516 vf_prof->entry_h[i][tun] = 0; 517 } 518 519 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 520 devm_kfree(dev, vf_prof->fdir_seg[tun]); 521 vf_prof->fdir_seg[tun] = NULL; 522 523 for (i = 0; i < vf_prof->cnt; i++) 524 vf_prof->vsi_h[i] = 0; 525 526 fdir->prof_entry_cnt[flow][tun] = 0; 527 } 528 529 /** 530 * ice_vc_fdir_rem_prof_all - remove profile for this VF 531 * @vf: pointer to the VF structure 532 */ 533 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) 534 { 535 enum ice_fltr_ptype flow; 536 537 for (flow = ICE_FLTR_PTYPE_NONF_NONE; 538 flow < ICE_FLTR_PTYPE_MAX; flow++) { 539 ice_vc_fdir_rem_prof(vf, flow, 0); 540 ice_vc_fdir_rem_prof(vf, flow, 1); 541 } 542 } 543 544 /** 545 * ice_vc_fdir_has_prof_conflict 546 * @vf: pointer to the VF structure 547 * @conf: FDIR configuration for each filter 548 * 549 * Check if @conf has conflicting profile with existing profiles 550 * 551 * Return: true on success, and false on error. 552 */ 553 static bool 554 ice_vc_fdir_has_prof_conflict(struct ice_vf *vf, 555 struct virtchnl_fdir_fltr_conf *conf) 556 { 557 struct ice_fdir_fltr *desc; 558 559 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 560 struct virtchnl_fdir_fltr_conf *existing_conf; 561 enum ice_fltr_ptype flow_type_a, flow_type_b; 562 struct ice_fdir_fltr *a, *b; 563 564 existing_conf = to_fltr_conf_from_desc(desc); 565 a = &existing_conf->input; 566 b = &conf->input; 567 flow_type_a = a->flow_type; 568 flow_type_b = b->flow_type; 569 570 /* No need to compare two rules with different tunnel types or 571 * with the same protocol type. 572 */ 573 if (existing_conf->ttype != conf->ttype || 574 flow_type_a == flow_type_b) 575 continue; 576 577 switch (flow_type_a) { 578 case ICE_FLTR_PTYPE_NONF_IPV4_UDP: 579 case ICE_FLTR_PTYPE_NONF_IPV4_TCP: 580 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: 581 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) 582 return true; 583 break; 584 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: 585 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP || 586 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP || 587 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) 588 return true; 589 break; 590 case ICE_FLTR_PTYPE_NONF_IPV6_UDP: 591 case ICE_FLTR_PTYPE_NONF_IPV6_TCP: 592 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: 593 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) 594 return true; 595 break; 596 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: 597 if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP || 598 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP || 599 flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) 600 return true; 601 break; 602 default: 603 break; 604 } 605 } 606 607 return false; 608 } 609 610 /** 611 * ice_vc_fdir_write_flow_prof 612 * @vf: pointer to the VF structure 613 * @flow: filter flow type 614 * @seg: array of one or more packet segments that describe the flow 615 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 616 * 617 * Write the flow's profile config and packet segment into the hardware 618 * 619 * Return: 0 on success, and other on error. 620 */ 621 static int 622 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, 623 struct ice_flow_seg_info *seg, int tun) 624 { 625 struct ice_vf_fdir *fdir = &vf->fdir; 626 struct ice_vsi *vf_vsi, *ctrl_vsi; 627 struct ice_flow_seg_info *old_seg; 628 struct ice_flow_prof *prof = NULL; 629 struct ice_fd_hw_prof *vf_prof; 630 struct device *dev; 631 struct ice_pf *pf; 632 struct ice_hw *hw; 633 u64 entry1_h = 0; 634 u64 entry2_h = 0; 635 u64 prof_id; 636 int ret; 637 638 pf = vf->pf; 639 dev = ice_pf_to_dev(pf); 640 hw = &pf->hw; 641 vf_vsi = ice_get_vf_vsi(vf); 642 if (!vf_vsi) 643 return -EINVAL; 644 645 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 646 if (!ctrl_vsi) 647 return -EINVAL; 648 649 vf_prof = fdir->fdir_prof[flow]; 650 old_seg = vf_prof->fdir_seg[tun]; 651 if (old_seg) { 652 if (!memcmp(old_seg, seg, sizeof(*seg))) { 653 dev_dbg(dev, "Duplicated profile for VF %d!\n", 654 vf->vf_id); 655 return -EEXIST; 656 } 657 658 if (fdir->fdir_fltr_cnt[flow][tun]) { 659 ret = -EINVAL; 660 dev_dbg(dev, "Input set conflicts for VF %d\n", 661 vf->vf_id); 662 goto err_exit; 663 } 664 665 /* remove previously allocated profile */ 666 ice_vc_fdir_rem_prof(vf, flow, tun); 667 } 668 669 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, 670 tun ? ICE_FLTR_PTYPE_MAX : 0); 671 672 ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, 673 tun + 1, &prof); 674 if (ret) { 675 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", 676 flow, vf->vf_id); 677 goto err_exit; 678 } 679 680 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 681 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, 682 seg, &entry1_h); 683 if (ret) { 684 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", 685 flow, vf->vf_id); 686 goto err_prof; 687 } 688 689 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, 690 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, 691 seg, &entry2_h); 692 if (ret) { 693 dev_dbg(dev, 694 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", 695 flow, vf->vf_id); 696 goto err_entry_1; 697 } 698 699 vf_prof->fdir_seg[tun] = seg; 700 vf_prof->cnt = 0; 701 fdir->prof_entry_cnt[flow][tun] = 0; 702 703 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; 704 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; 705 vf_prof->cnt++; 706 fdir->prof_entry_cnt[flow][tun]++; 707 708 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; 709 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; 710 vf_prof->cnt++; 711 fdir->prof_entry_cnt[flow][tun]++; 712 713 return 0; 714 715 err_entry_1: 716 ice_rem_prof_id_flow(hw, ICE_BLK_FD, 717 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); 718 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h); 719 err_prof: 720 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); 721 err_exit: 722 return ret; 723 } 724 725 /** 726 * ice_vc_fdir_config_input_set 727 * @vf: pointer to the VF structure 728 * @fltr: virtual channel add cmd buffer 729 * @conf: FDIR configuration for each filter 730 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter 731 * 732 * Config the input set type and value for virtual channel add msg buffer 733 * 734 * Return: 0 on success, and other on error. 735 */ 736 static int 737 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 738 struct virtchnl_fdir_fltr_conf *conf, int tun) 739 { 740 struct ice_fdir_fltr *input = &conf->input; 741 struct device *dev = ice_pf_to_dev(vf->pf); 742 struct ice_flow_seg_info *seg; 743 enum ice_fltr_ptype flow; 744 int ret; 745 746 ret = ice_vc_fdir_has_prof_conflict(vf, conf); 747 if (ret) { 748 dev_dbg(dev, "Found flow profile conflict for VF %d\n", 749 vf->vf_id); 750 return ret; 751 } 752 753 flow = input->flow_type; 754 ret = ice_vc_fdir_alloc_prof(vf, flow); 755 if (ret) { 756 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id); 757 return ret; 758 } 759 760 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); 761 if (!seg) 762 return -ENOMEM; 763 764 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); 765 if (ret) { 766 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id); 767 goto err_exit; 768 } 769 770 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); 771 if (ret) { 772 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id); 773 goto err_exit; 774 } 775 776 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); 777 if (ret == -EEXIST) { 778 devm_kfree(dev, seg); 779 } else if (ret) { 780 dev_dbg(dev, "Write flow profile for VF %d failed\n", 781 vf->vf_id); 782 goto err_exit; 783 } 784 785 return 0; 786 787 err_exit: 788 devm_kfree(dev, seg); 789 return ret; 790 } 791 792 /** 793 * ice_vc_fdir_parse_pattern 794 * @vf: pointer to the VF info 795 * @fltr: virtual channel add cmd buffer 796 * @conf: FDIR configuration for each filter 797 * 798 * Parse the virtual channel filter's pattern and store them into conf 799 * 800 * Return: 0 on success, and other on error. 801 */ 802 static int 803 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 804 struct virtchnl_fdir_fltr_conf *conf) 805 { 806 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 807 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE; 808 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE; 809 struct device *dev = ice_pf_to_dev(vf->pf); 810 struct ice_fdir_fltr *input = &conf->input; 811 int i; 812 813 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) { 814 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n", 815 proto->count, vf->vf_id); 816 return -EINVAL; 817 } 818 819 for (i = 0; i < proto->count; i++) { 820 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; 821 struct ip_esp_hdr *esph; 822 struct ip_auth_hdr *ah; 823 struct sctphdr *sctph; 824 struct ipv6hdr *ip6h; 825 struct udphdr *udph; 826 struct tcphdr *tcph; 827 struct ethhdr *eth; 828 struct iphdr *iph; 829 u8 s_field; 830 u8 *rawh; 831 832 switch (hdr->type) { 833 case VIRTCHNL_PROTO_HDR_ETH: 834 eth = (struct ethhdr *)hdr->buffer; 835 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2; 836 837 if (hdr->field_selector) 838 input->ext_data.ether_type = eth->h_proto; 839 break; 840 case VIRTCHNL_PROTO_HDR_IPV4: 841 iph = (struct iphdr *)hdr->buffer; 842 l3 = VIRTCHNL_PROTO_HDR_IPV4; 843 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; 844 845 if (hdr->field_selector) { 846 input->ip.v4.src_ip = iph->saddr; 847 input->ip.v4.dst_ip = iph->daddr; 848 input->ip.v4.tos = iph->tos; 849 input->ip.v4.proto = iph->protocol; 850 } 851 break; 852 case VIRTCHNL_PROTO_HDR_IPV6: 853 ip6h = (struct ipv6hdr *)hdr->buffer; 854 l3 = VIRTCHNL_PROTO_HDR_IPV6; 855 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; 856 857 if (hdr->field_selector) { 858 memcpy(input->ip.v6.src_ip, 859 ip6h->saddr.in6_u.u6_addr8, 860 sizeof(ip6h->saddr)); 861 memcpy(input->ip.v6.dst_ip, 862 ip6h->daddr.in6_u.u6_addr8, 863 sizeof(ip6h->daddr)); 864 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) | 865 (ip6h->flow_lbl[0] >> 4); 866 input->ip.v6.proto = ip6h->nexthdr; 867 } 868 break; 869 case VIRTCHNL_PROTO_HDR_TCP: 870 tcph = (struct tcphdr *)hdr->buffer; 871 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 872 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP; 873 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 874 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP; 875 876 if (hdr->field_selector) { 877 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 878 input->ip.v4.src_port = tcph->source; 879 input->ip.v4.dst_port = tcph->dest; 880 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 881 input->ip.v6.src_port = tcph->source; 882 input->ip.v6.dst_port = tcph->dest; 883 } 884 } 885 break; 886 case VIRTCHNL_PROTO_HDR_UDP: 887 udph = (struct udphdr *)hdr->buffer; 888 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 889 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP; 890 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 891 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP; 892 893 if (hdr->field_selector) { 894 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 895 input->ip.v4.src_port = udph->source; 896 input->ip.v4.dst_port = udph->dest; 897 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 898 input->ip.v6.src_port = udph->source; 899 input->ip.v6.dst_port = udph->dest; 900 } 901 } 902 break; 903 case VIRTCHNL_PROTO_HDR_SCTP: 904 sctph = (struct sctphdr *)hdr->buffer; 905 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 906 input->flow_type = 907 ICE_FLTR_PTYPE_NONF_IPV4_SCTP; 908 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 909 input->flow_type = 910 ICE_FLTR_PTYPE_NONF_IPV6_SCTP; 911 912 if (hdr->field_selector) { 913 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) { 914 input->ip.v4.src_port = sctph->source; 915 input->ip.v4.dst_port = sctph->dest; 916 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) { 917 input->ip.v6.src_port = sctph->source; 918 input->ip.v6.dst_port = sctph->dest; 919 } 920 } 921 break; 922 case VIRTCHNL_PROTO_HDR_L2TPV3: 923 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 924 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3; 925 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 926 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3; 927 928 if (hdr->field_selector) 929 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer); 930 break; 931 case VIRTCHNL_PROTO_HDR_ESP: 932 esph = (struct ip_esp_hdr *)hdr->buffer; 933 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 934 l4 == VIRTCHNL_PROTO_HDR_UDP) 935 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP; 936 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 937 l4 == VIRTCHNL_PROTO_HDR_UDP) 938 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP; 939 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && 940 l4 == VIRTCHNL_PROTO_HDR_NONE) 941 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP; 942 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && 943 l4 == VIRTCHNL_PROTO_HDR_NONE) 944 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP; 945 946 if (l4 == VIRTCHNL_PROTO_HDR_UDP) 947 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP; 948 else 949 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC; 950 951 if (hdr->field_selector) { 952 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 953 input->ip.v4.sec_parm_idx = esph->spi; 954 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 955 input->ip.v6.sec_parm_idx = esph->spi; 956 } 957 break; 958 case VIRTCHNL_PROTO_HDR_AH: 959 ah = (struct ip_auth_hdr *)hdr->buffer; 960 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 961 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH; 962 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 963 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH; 964 965 if (hdr->field_selector) { 966 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 967 input->ip.v4.sec_parm_idx = ah->spi; 968 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 969 input->ip.v6.sec_parm_idx = ah->spi; 970 } 971 break; 972 case VIRTCHNL_PROTO_HDR_PFCP: 973 rawh = (u8 *)hdr->buffer; 974 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK; 975 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0) 976 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE; 977 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1) 978 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION; 979 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0) 980 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE; 981 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1) 982 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION; 983 984 if (hdr->field_selector) { 985 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) 986 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR); 987 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) 988 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR); 989 } 990 break; 991 case VIRTCHNL_PROTO_HDR_GTPU_IP: 992 rawh = (u8 *)hdr->buffer; 993 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER; 994 995 if (hdr->field_selector) 996 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]); 997 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU; 998 break; 999 case VIRTCHNL_PROTO_HDR_GTPU_EH: 1000 rawh = (u8 *)hdr->buffer; 1001 1002 if (hdr->field_selector) 1003 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK; 1004 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH; 1005 break; 1006 default: 1007 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n", 1008 hdr->type, vf->vf_id); 1009 return -EINVAL; 1010 } 1011 } 1012 1013 return 0; 1014 } 1015 1016 /** 1017 * ice_vc_fdir_parse_action 1018 * @vf: pointer to the VF info 1019 * @fltr: virtual channel add cmd buffer 1020 * @conf: FDIR configuration for each filter 1021 * 1022 * Parse the virtual channel filter's action and store them into conf 1023 * 1024 * Return: 0 on success, and other on error. 1025 */ 1026 static int 1027 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1028 struct virtchnl_fdir_fltr_conf *conf) 1029 { 1030 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set; 1031 struct device *dev = ice_pf_to_dev(vf->pf); 1032 struct ice_fdir_fltr *input = &conf->input; 1033 u32 dest_num = 0; 1034 u32 mark_num = 0; 1035 int i; 1036 1037 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) { 1038 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n", 1039 as->count, vf->vf_id); 1040 return -EINVAL; 1041 } 1042 1043 for (i = 0; i < as->count; i++) { 1044 struct virtchnl_filter_action *action = &as->actions[i]; 1045 1046 switch (action->type) { 1047 case VIRTCHNL_ACTION_PASSTHRU: 1048 dest_num++; 1049 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER; 1050 break; 1051 case VIRTCHNL_ACTION_DROP: 1052 dest_num++; 1053 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; 1054 break; 1055 case VIRTCHNL_ACTION_QUEUE: 1056 dest_num++; 1057 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; 1058 input->q_index = action->act_conf.queue.index; 1059 break; 1060 case VIRTCHNL_ACTION_Q_REGION: 1061 dest_num++; 1062 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; 1063 input->q_index = action->act_conf.queue.index; 1064 input->q_region = action->act_conf.queue.region; 1065 break; 1066 case VIRTCHNL_ACTION_MARK: 1067 mark_num++; 1068 input->fltr_id = action->act_conf.mark_id; 1069 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE; 1070 break; 1071 default: 1072 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n", 1073 action->type, vf->vf_id); 1074 return -EINVAL; 1075 } 1076 } 1077 1078 if (dest_num == 0 || dest_num >= 2) { 1079 dev_dbg(dev, "Invalid destination action for VF %d\n", 1080 vf->vf_id); 1081 return -EINVAL; 1082 } 1083 1084 if (mark_num >= 2) { 1085 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id); 1086 return -EINVAL; 1087 } 1088 1089 return 0; 1090 } 1091 1092 /** 1093 * ice_vc_validate_fdir_fltr - validate the virtual channel filter 1094 * @vf: pointer to the VF info 1095 * @fltr: virtual channel add cmd buffer 1096 * @conf: FDIR configuration for each filter 1097 * 1098 * Return: 0 on success, and other on error. 1099 */ 1100 static int 1101 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, 1102 struct virtchnl_fdir_fltr_conf *conf) 1103 { 1104 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; 1105 int ret; 1106 1107 if (!ice_vc_validate_pattern(vf, proto)) 1108 return -EINVAL; 1109 1110 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); 1111 if (ret) 1112 return ret; 1113 1114 return ice_vc_fdir_parse_action(vf, fltr, conf); 1115 } 1116 1117 /** 1118 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value 1119 * @conf_a: FDIR configuration for filter a 1120 * @conf_b: FDIR configuration for filter b 1121 * 1122 * Return: 0 on success, and other on error. 1123 */ 1124 static bool 1125 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, 1126 struct virtchnl_fdir_fltr_conf *conf_b) 1127 { 1128 struct ice_fdir_fltr *a = &conf_a->input; 1129 struct ice_fdir_fltr *b = &conf_b->input; 1130 1131 if (conf_a->ttype != conf_b->ttype) 1132 return false; 1133 if (a->flow_type != b->flow_type) 1134 return false; 1135 if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) 1136 return false; 1137 if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) 1138 return false; 1139 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data))) 1140 return false; 1141 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask))) 1142 return false; 1143 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data))) 1144 return false; 1145 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask))) 1146 return false; 1147 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) 1148 return false; 1149 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) 1150 return false; 1151 1152 return true; 1153 } 1154 1155 /** 1156 * ice_vc_fdir_is_dup_fltr 1157 * @vf: pointer to the VF info 1158 * @conf: FDIR configuration for each filter 1159 * 1160 * Check if there is duplicated rule with same conf value 1161 * 1162 * Return: 0 true success, and false on error. 1163 */ 1164 static bool 1165 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) 1166 { 1167 struct ice_fdir_fltr *desc; 1168 bool ret; 1169 1170 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { 1171 struct virtchnl_fdir_fltr_conf *node = 1172 to_fltr_conf_from_desc(desc); 1173 1174 ret = ice_vc_fdir_comp_rules(node, conf); 1175 if (ret) 1176 return true; 1177 } 1178 1179 return false; 1180 } 1181 1182 /** 1183 * ice_vc_fdir_insert_entry 1184 * @vf: pointer to the VF info 1185 * @conf: FDIR configuration for each filter 1186 * @id: pointer to ID value allocated by driver 1187 * 1188 * Insert FDIR conf entry into list and allocate ID for this filter 1189 * 1190 * Return: 0 true success, and other on error. 1191 */ 1192 static int 1193 ice_vc_fdir_insert_entry(struct ice_vf *vf, 1194 struct virtchnl_fdir_fltr_conf *conf, u32 *id) 1195 { 1196 struct ice_fdir_fltr *input = &conf->input; 1197 int i; 1198 1199 /* alloc ID corresponding with conf */ 1200 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, 1201 ICE_FDIR_MAX_FLTRS, GFP_KERNEL); 1202 if (i < 0) 1203 return -EINVAL; 1204 *id = i; 1205 1206 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); 1207 return 0; 1208 } 1209 1210 /** 1211 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value 1212 * @vf: pointer to the VF info 1213 * @conf: FDIR configuration for each filter 1214 * @id: filter rule's ID 1215 */ 1216 static void 1217 ice_vc_fdir_remove_entry(struct ice_vf *vf, 1218 struct virtchnl_fdir_fltr_conf *conf, u32 id) 1219 { 1220 struct ice_fdir_fltr *input = &conf->input; 1221 1222 idr_remove(&vf->fdir.fdir_rule_idr, id); 1223 list_del(&input->fltr_node); 1224 } 1225 1226 /** 1227 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value 1228 * @vf: pointer to the VF info 1229 * @id: filter rule's ID 1230 * 1231 * Return: NULL on error, and other on success. 1232 */ 1233 static struct virtchnl_fdir_fltr_conf * 1234 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) 1235 { 1236 return idr_find(&vf->fdir.fdir_rule_idr, id); 1237 } 1238 1239 /** 1240 * ice_vc_fdir_flush_entry - remove all FDIR conf entry 1241 * @vf: pointer to the VF info 1242 */ 1243 static void ice_vc_fdir_flush_entry(struct ice_vf *vf) 1244 { 1245 struct virtchnl_fdir_fltr_conf *conf; 1246 struct ice_fdir_fltr *desc, *temp; 1247 1248 list_for_each_entry_safe(desc, temp, 1249 &vf->fdir.fdir_rule_list, fltr_node) { 1250 conf = to_fltr_conf_from_desc(desc); 1251 list_del(&desc->fltr_node); 1252 devm_kfree(ice_pf_to_dev(vf->pf), conf); 1253 } 1254 } 1255 1256 /** 1257 * ice_vc_fdir_write_fltr - write filter rule into hardware 1258 * @vf: pointer to the VF info 1259 * @conf: FDIR configuration for each filter 1260 * @add: true implies add rule, false implies del rules 1261 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter 1262 * 1263 * Return: 0 on success, and other on error. 1264 */ 1265 static int ice_vc_fdir_write_fltr(struct ice_vf *vf, 1266 struct virtchnl_fdir_fltr_conf *conf, 1267 bool add, bool is_tun) 1268 { 1269 struct ice_fdir_fltr *input = &conf->input; 1270 struct ice_vsi *vsi, *ctrl_vsi; 1271 struct ice_fltr_desc desc; 1272 struct device *dev; 1273 struct ice_pf *pf; 1274 struct ice_hw *hw; 1275 int ret; 1276 u8 *pkt; 1277 1278 pf = vf->pf; 1279 dev = ice_pf_to_dev(pf); 1280 hw = &pf->hw; 1281 vsi = ice_get_vf_vsi(vf); 1282 if (!vsi) { 1283 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id); 1284 return -EINVAL; 1285 } 1286 1287 input->dest_vsi = vsi->idx; 1288 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW; 1289 1290 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; 1291 if (!ctrl_vsi) { 1292 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id); 1293 return -EINVAL; 1294 } 1295 1296 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL); 1297 if (!pkt) 1298 return -ENOMEM; 1299 1300 ice_fdir_get_prgm_desc(hw, input, &desc, add); 1301 ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); 1302 if (ret) { 1303 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", 1304 vf->vf_id, input->flow_type); 1305 goto err_free_pkt; 1306 } 1307 1308 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); 1309 if (ret) 1310 goto err_free_pkt; 1311 1312 return 0; 1313 1314 err_free_pkt: 1315 devm_kfree(dev, pkt); 1316 return ret; 1317 } 1318 1319 /** 1320 * ice_vf_fdir_timer - FDIR program waiting timer interrupt handler 1321 * @t: pointer to timer_list 1322 */ 1323 static void ice_vf_fdir_timer(struct timer_list *t) 1324 { 1325 struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr); 1326 struct ice_vf_fdir_ctx *ctx_done; 1327 struct ice_vf_fdir *fdir; 1328 unsigned long flags; 1329 struct ice_vf *vf; 1330 struct ice_pf *pf; 1331 1332 fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq); 1333 vf = container_of(fdir, struct ice_vf, fdir); 1334 ctx_done = &fdir->ctx_done; 1335 pf = vf->pf; 1336 spin_lock_irqsave(&fdir->ctx_lock, flags); 1337 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1338 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1339 WARN_ON_ONCE(1); 1340 return; 1341 } 1342 1343 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1344 1345 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1346 ctx_done->conf = ctx_irq->conf; 1347 ctx_done->stat = ICE_FDIR_CTX_TIMEOUT; 1348 ctx_done->v_opcode = ctx_irq->v_opcode; 1349 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1350 1351 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1352 ice_service_task_schedule(pf); 1353 } 1354 1355 /** 1356 * ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler 1357 * @ctrl_vsi: pointer to a VF's CTRL VSI 1358 * @rx_desc: pointer to FDIR Rx queue descriptor 1359 */ 1360 void 1361 ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, 1362 union ice_32b_rx_flex_desc *rx_desc) 1363 { 1364 struct ice_pf *pf = ctrl_vsi->back; 1365 struct ice_vf *vf = ctrl_vsi->vf; 1366 struct ice_vf_fdir_ctx *ctx_done; 1367 struct ice_vf_fdir_ctx *ctx_irq; 1368 struct ice_vf_fdir *fdir; 1369 unsigned long flags; 1370 struct device *dev; 1371 int ret; 1372 1373 if (WARN_ON(!vf)) 1374 return; 1375 1376 fdir = &vf->fdir; 1377 ctx_done = &fdir->ctx_done; 1378 ctx_irq = &fdir->ctx_irq; 1379 dev = ice_pf_to_dev(pf); 1380 spin_lock_irqsave(&fdir->ctx_lock, flags); 1381 if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) { 1382 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1383 WARN_ON_ONCE(1); 1384 return; 1385 } 1386 1387 ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID; 1388 1389 ctx_done->flags |= ICE_VF_FDIR_CTX_VALID; 1390 ctx_done->conf = ctx_irq->conf; 1391 ctx_done->stat = ICE_FDIR_CTX_IRQ; 1392 ctx_done->v_opcode = ctx_irq->v_opcode; 1393 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1394 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1395 1396 ret = del_timer(&ctx_irq->rx_tmr); 1397 if (!ret) 1398 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1399 1400 set_bit(ICE_FD_VF_FLUSH_CTX, pf->state); 1401 ice_service_task_schedule(pf); 1402 } 1403 1404 /** 1405 * ice_vf_fdir_dump_info - dump FDIR information for diagnosis 1406 * @vf: pointer to the VF info 1407 */ 1408 static void ice_vf_fdir_dump_info(struct ice_vf *vf) 1409 { 1410 struct ice_vsi *vf_vsi; 1411 u32 fd_size, fd_cnt; 1412 struct device *dev; 1413 struct ice_pf *pf; 1414 struct ice_hw *hw; 1415 u16 vsi_num; 1416 1417 pf = vf->pf; 1418 hw = &pf->hw; 1419 dev = ice_pf_to_dev(pf); 1420 vf_vsi = ice_get_vf_vsi(vf); 1421 if (!vf_vsi) { 1422 dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); 1423 return; 1424 } 1425 1426 vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); 1427 1428 fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); 1429 fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); 1430 dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", 1431 vf->vf_id, 1432 (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1433 (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, 1434 (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, 1435 (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); 1436 } 1437 1438 /** 1439 * ice_vf_verify_rx_desc - verify received FDIR programming status descriptor 1440 * @vf: pointer to the VF info 1441 * @ctx: FDIR context info for post processing 1442 * @status: virtchnl FDIR program status 1443 * 1444 * Return: 0 on success, and other on error. 1445 */ 1446 static int 1447 ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1448 enum virtchnl_fdir_prgm_status *status) 1449 { 1450 struct device *dev = ice_pf_to_dev(vf->pf); 1451 u32 stat_err, error, prog_id; 1452 int ret; 1453 1454 stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0); 1455 if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >> 1456 ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) { 1457 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1458 dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id); 1459 ret = -EINVAL; 1460 goto err_exit; 1461 } 1462 1463 prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >> 1464 ICE_FXD_FLTR_WB_QW1_PROG_ID_S; 1465 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD && 1466 ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) { 1467 dev_err(dev, "VF %d: Desc show add, but ctx not", 1468 vf->vf_id); 1469 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1470 ret = -EINVAL; 1471 goto err_exit; 1472 } 1473 1474 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL && 1475 ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) { 1476 dev_err(dev, "VF %d: Desc show del, but ctx not", 1477 vf->vf_id); 1478 *status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1479 ret = -EINVAL; 1480 goto err_exit; 1481 } 1482 1483 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >> 1484 ICE_FXD_FLTR_WB_QW1_FAIL_S; 1485 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) { 1486 if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) { 1487 dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table", 1488 vf->vf_id); 1489 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1490 } else { 1491 dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry", 1492 vf->vf_id); 1493 *status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1494 } 1495 ret = -EINVAL; 1496 goto err_exit; 1497 } 1498 1499 error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >> 1500 ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S; 1501 if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) { 1502 dev_err(dev, "VF %d: Profile matching error", vf->vf_id); 1503 *status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1504 ret = -EINVAL; 1505 goto err_exit; 1506 } 1507 1508 *status = VIRTCHNL_FDIR_SUCCESS; 1509 1510 return 0; 1511 1512 err_exit: 1513 ice_vf_fdir_dump_info(vf); 1514 return ret; 1515 } 1516 1517 /** 1518 * ice_vc_add_fdir_fltr_post 1519 * @vf: pointer to the VF structure 1520 * @ctx: FDIR context info for post processing 1521 * @status: virtchnl FDIR program status 1522 * @success: true implies success, false implies failure 1523 * 1524 * Post process for flow director add command. If success, then do post process 1525 * and send back success msg by virtchnl. Otherwise, do context reversion and 1526 * send back failure msg by virtchnl. 1527 * 1528 * Return: 0 on success, and other on error. 1529 */ 1530 static int 1531 ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1532 enum virtchnl_fdir_prgm_status status, 1533 bool success) 1534 { 1535 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1536 struct device *dev = ice_pf_to_dev(vf->pf); 1537 enum virtchnl_status_code v_ret; 1538 struct virtchnl_fdir_add *resp; 1539 int ret, len, is_tun; 1540 1541 v_ret = VIRTCHNL_STATUS_SUCCESS; 1542 len = sizeof(*resp); 1543 resp = kzalloc(len, GFP_KERNEL); 1544 if (!resp) { 1545 len = 0; 1546 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1547 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1548 goto err_exit; 1549 } 1550 1551 if (!success) 1552 goto err_exit; 1553 1554 is_tun = 0; 1555 resp->status = status; 1556 resp->flow_id = conf->flow_id; 1557 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; 1558 1559 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1560 (u8 *)resp, len); 1561 kfree(resp); 1562 1563 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1564 vf->vf_id, conf->flow_id, 1565 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1566 "add" : "del"); 1567 return ret; 1568 1569 err_exit: 1570 if (resp) 1571 resp->status = status; 1572 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1573 devm_kfree(dev, conf); 1574 1575 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1576 (u8 *)resp, len); 1577 kfree(resp); 1578 return ret; 1579 } 1580 1581 /** 1582 * ice_vc_del_fdir_fltr_post 1583 * @vf: pointer to the VF structure 1584 * @ctx: FDIR context info for post processing 1585 * @status: virtchnl FDIR program status 1586 * @success: true implies success, false implies failure 1587 * 1588 * Post process for flow director del command. If success, then do post process 1589 * and send back success msg by virtchnl. Otherwise, do context reversion and 1590 * send back failure msg by virtchnl. 1591 * 1592 * Return: 0 on success, and other on error. 1593 */ 1594 static int 1595 ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, 1596 enum virtchnl_fdir_prgm_status status, 1597 bool success) 1598 { 1599 struct virtchnl_fdir_fltr_conf *conf = ctx->conf; 1600 struct device *dev = ice_pf_to_dev(vf->pf); 1601 enum virtchnl_status_code v_ret; 1602 struct virtchnl_fdir_del *resp; 1603 int ret, len, is_tun; 1604 1605 v_ret = VIRTCHNL_STATUS_SUCCESS; 1606 len = sizeof(*resp); 1607 resp = kzalloc(len, GFP_KERNEL); 1608 if (!resp) { 1609 len = 0; 1610 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1611 dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id); 1612 goto err_exit; 1613 } 1614 1615 if (!success) 1616 goto err_exit; 1617 1618 is_tun = 0; 1619 resp->status = status; 1620 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1621 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; 1622 1623 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1624 (u8 *)resp, len); 1625 kfree(resp); 1626 1627 dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n", 1628 vf->vf_id, conf->flow_id, 1629 (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ? 1630 "add" : "del"); 1631 devm_kfree(dev, conf); 1632 return ret; 1633 1634 err_exit: 1635 if (resp) 1636 resp->status = status; 1637 if (success) 1638 devm_kfree(dev, conf); 1639 1640 ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, 1641 (u8 *)resp, len); 1642 kfree(resp); 1643 return ret; 1644 } 1645 1646 /** 1647 * ice_flush_fdir_ctx 1648 * @pf: pointer to the PF structure 1649 * 1650 * Flush all the pending event on ctx_done list and process them. 1651 */ 1652 void ice_flush_fdir_ctx(struct ice_pf *pf) 1653 { 1654 struct ice_vf *vf; 1655 unsigned int bkt; 1656 1657 if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) 1658 return; 1659 1660 mutex_lock(&pf->vfs.table_lock); 1661 ice_for_each_vf(pf, bkt, vf) { 1662 struct device *dev = ice_pf_to_dev(pf); 1663 enum virtchnl_fdir_prgm_status status; 1664 struct ice_vf_fdir_ctx *ctx; 1665 unsigned long flags; 1666 int ret; 1667 1668 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1669 continue; 1670 1671 if (vf->ctrl_vsi_idx == ICE_NO_VSI) 1672 continue; 1673 1674 ctx = &vf->fdir.ctx_done; 1675 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1676 if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) { 1677 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1678 continue; 1679 } 1680 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1681 1682 WARN_ON(ctx->stat == ICE_FDIR_CTX_READY); 1683 if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) { 1684 status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT; 1685 dev_err(dev, "VF %d: ctrl_vsi irq timeout\n", 1686 vf->vf_id); 1687 goto err_exit; 1688 } 1689 1690 ret = ice_vf_verify_rx_desc(vf, ctx, &status); 1691 if (ret) 1692 goto err_exit; 1693 1694 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1695 ice_vc_add_fdir_fltr_post(vf, ctx, status, true); 1696 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1697 ice_vc_del_fdir_fltr_post(vf, ctx, status, true); 1698 else 1699 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1700 1701 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1702 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1703 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1704 continue; 1705 err_exit: 1706 if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) 1707 ice_vc_add_fdir_fltr_post(vf, ctx, status, false); 1708 else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER) 1709 ice_vc_del_fdir_fltr_post(vf, ctx, status, false); 1710 else 1711 dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id); 1712 1713 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1714 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1715 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1716 } 1717 mutex_unlock(&pf->vfs.table_lock); 1718 } 1719 1720 /** 1721 * ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler 1722 * @vf: pointer to the VF structure 1723 * @conf: FDIR configuration for each filter 1724 * @v_opcode: virtual channel operation code 1725 * 1726 * Return: 0 on success, and other on error. 1727 */ 1728 static int 1729 ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf, 1730 enum virtchnl_ops v_opcode) 1731 { 1732 struct device *dev = ice_pf_to_dev(vf->pf); 1733 struct ice_vf_fdir_ctx *ctx; 1734 unsigned long flags; 1735 1736 ctx = &vf->fdir.ctx_irq; 1737 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1738 if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) || 1739 (vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) { 1740 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1741 dev_dbg(dev, "VF %d: Last request is still in progress\n", 1742 vf->vf_id); 1743 return -EBUSY; 1744 } 1745 ctx->flags |= ICE_VF_FDIR_CTX_VALID; 1746 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1747 1748 ctx->conf = conf; 1749 ctx->v_opcode = v_opcode; 1750 ctx->stat = ICE_FDIR_CTX_READY; 1751 timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0); 1752 1753 mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies)); 1754 1755 return 0; 1756 } 1757 1758 /** 1759 * ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler 1760 * @vf: pointer to the VF structure 1761 * 1762 * Return: 0 on success, and other on error. 1763 */ 1764 static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) 1765 { 1766 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1767 unsigned long flags; 1768 1769 del_timer(&ctx->rx_tmr); 1770 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1771 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1772 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); 1773 } 1774 1775 /** 1776 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer 1777 * @vf: pointer to the VF info 1778 * @msg: pointer to the msg buffer 1779 * 1780 * Return: 0 on success, and other on error. 1781 */ 1782 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) 1783 { 1784 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; 1785 struct virtchnl_fdir_add *stat = NULL; 1786 struct virtchnl_fdir_fltr_conf *conf; 1787 enum virtchnl_status_code v_ret; 1788 struct device *dev; 1789 struct ice_pf *pf; 1790 int is_tun = 0; 1791 int len = 0; 1792 int ret; 1793 1794 pf = vf->pf; 1795 dev = ice_pf_to_dev(pf); 1796 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1797 if (ret) { 1798 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1799 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1800 goto err_exit; 1801 } 1802 1803 ret = ice_vf_start_ctrl_vsi(vf); 1804 if (ret && (ret != -EEXIST)) { 1805 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1806 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n", 1807 vf->vf_id, ret); 1808 goto err_exit; 1809 } 1810 1811 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1812 if (!stat) { 1813 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1814 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1815 goto err_exit; 1816 } 1817 1818 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL); 1819 if (!conf) { 1820 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1821 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id); 1822 goto err_exit; 1823 } 1824 1825 len = sizeof(*stat); 1826 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); 1827 if (ret) { 1828 v_ret = VIRTCHNL_STATUS_SUCCESS; 1829 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; 1830 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); 1831 goto err_free_conf; 1832 } 1833 1834 if (fltr->validate_only) { 1835 v_ret = VIRTCHNL_STATUS_SUCCESS; 1836 stat->status = VIRTCHNL_FDIR_SUCCESS; 1837 devm_kfree(dev, conf); 1838 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, 1839 v_ret, (u8 *)stat, len); 1840 goto exit; 1841 } 1842 1843 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); 1844 if (ret) { 1845 v_ret = VIRTCHNL_STATUS_SUCCESS; 1846 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT; 1847 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n", 1848 vf->vf_id, ret); 1849 goto err_free_conf; 1850 } 1851 1852 ret = ice_vc_fdir_is_dup_fltr(vf, conf); 1853 if (ret) { 1854 v_ret = VIRTCHNL_STATUS_SUCCESS; 1855 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST; 1856 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n", 1857 vf->vf_id); 1858 goto err_free_conf; 1859 } 1860 1861 ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); 1862 if (ret) { 1863 v_ret = VIRTCHNL_STATUS_SUCCESS; 1864 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1865 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id); 1866 goto err_free_conf; 1867 } 1868 1869 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER); 1870 if (ret) { 1871 v_ret = VIRTCHNL_STATUS_SUCCESS; 1872 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1873 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1874 goto err_free_conf; 1875 } 1876 1877 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); 1878 if (ret) { 1879 v_ret = VIRTCHNL_STATUS_SUCCESS; 1880 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1881 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1882 vf->vf_id, ret); 1883 goto err_rem_entry; 1884 } 1885 1886 exit: 1887 kfree(stat); 1888 return ret; 1889 1890 err_rem_entry: 1891 ice_vc_fdir_clear_irq_ctx(vf); 1892 ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); 1893 err_free_conf: 1894 devm_kfree(dev, conf); 1895 err_exit: 1896 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret, 1897 (u8 *)stat, len); 1898 kfree(stat); 1899 return ret; 1900 } 1901 1902 /** 1903 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer 1904 * @vf: pointer to the VF info 1905 * @msg: pointer to the msg buffer 1906 * 1907 * Return: 0 on success, and other on error. 1908 */ 1909 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) 1910 { 1911 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; 1912 struct virtchnl_fdir_del *stat = NULL; 1913 struct virtchnl_fdir_fltr_conf *conf; 1914 enum virtchnl_status_code v_ret; 1915 struct device *dev; 1916 struct ice_pf *pf; 1917 int is_tun = 0; 1918 int len = 0; 1919 int ret; 1920 1921 pf = vf->pf; 1922 dev = ice_pf_to_dev(pf); 1923 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); 1924 if (ret) { 1925 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1926 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id); 1927 goto err_exit; 1928 } 1929 1930 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 1931 if (!stat) { 1932 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 1933 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id); 1934 goto err_exit; 1935 } 1936 1937 len = sizeof(*stat); 1938 1939 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); 1940 if (!conf) { 1941 v_ret = VIRTCHNL_STATUS_SUCCESS; 1942 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST; 1943 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n", 1944 vf->vf_id, fltr->flow_id); 1945 goto err_exit; 1946 } 1947 1948 /* Just return failure when ctrl_vsi idx is invalid */ 1949 if (vf->ctrl_vsi_idx == ICE_NO_VSI) { 1950 v_ret = VIRTCHNL_STATUS_SUCCESS; 1951 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1952 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id); 1953 goto err_exit; 1954 } 1955 1956 ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER); 1957 if (ret) { 1958 v_ret = VIRTCHNL_STATUS_SUCCESS; 1959 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1960 dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id); 1961 goto err_exit; 1962 } 1963 1964 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); 1965 if (ret) { 1966 v_ret = VIRTCHNL_STATUS_SUCCESS; 1967 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; 1968 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n", 1969 vf->vf_id, ret); 1970 goto err_del_tmr; 1971 } 1972 1973 kfree(stat); 1974 1975 return ret; 1976 1977 err_del_tmr: 1978 ice_vc_fdir_clear_irq_ctx(vf); 1979 err_exit: 1980 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret, 1981 (u8 *)stat, len); 1982 kfree(stat); 1983 return ret; 1984 } 1985 1986 /** 1987 * ice_vf_fdir_init - init FDIR resource for VF 1988 * @vf: pointer to the VF info 1989 */ 1990 void ice_vf_fdir_init(struct ice_vf *vf) 1991 { 1992 struct ice_vf_fdir *fdir = &vf->fdir; 1993 1994 idr_init(&fdir->fdir_rule_idr); 1995 INIT_LIST_HEAD(&fdir->fdir_rule_list); 1996 1997 spin_lock_init(&fdir->ctx_lock); 1998 fdir->ctx_irq.flags = 0; 1999 fdir->ctx_done.flags = 0; 2000 } 2001 2002 /** 2003 * ice_vf_fdir_exit - destroy FDIR resource for VF 2004 * @vf: pointer to the VF info 2005 */ 2006 void ice_vf_fdir_exit(struct ice_vf *vf) 2007 { 2008 ice_vc_fdir_flush_entry(vf); 2009 idr_destroy(&vf->fdir.fdir_rule_idr); 2010 ice_vc_fdir_rem_prof_all(vf); 2011 ice_vc_fdir_free_prof_all(vf); 2012 } 2013