1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2021-2021 Hisilicon Limited. 3 #include <linux/skbuff.h> 4 5 #include "hnae3.h" 6 #include "hclge_comm_cmd.h" 7 #include "hclge_comm_rss.h" 8 9 static const u8 hclge_comm_hash_key[] = { 10 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 11 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 12 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 13 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 14 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 15 }; 16 17 static void 18 hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev, 19 struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg) 20 { 21 rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 22 rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 23 rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; 24 rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 25 rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 26 rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 27 rss_tuple_cfg->ipv6_sctp_en = 28 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 29 HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT : 30 HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; 31 rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 32 } 33 34 int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, 35 struct hnae3_ae_dev *ae_dev, 36 struct hclge_comm_rss_cfg *rss_cfg) 37 { 38 u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size; 39 int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; 40 u16 *rss_ind_tbl; 41 42 if (nic->flags & HNAE3_SUPPORT_VF) 43 rss_cfg->rss_size = nic->kinfo.rss_size; 44 45 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 46 rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; 47 48 hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets); 49 50 rss_cfg->rss_algo = rss_algo; 51 52 rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size, 53 sizeof(*rss_ind_tbl), GFP_KERNEL); 54 if (!rss_ind_tbl) 55 return -ENOMEM; 56 57 rss_cfg->rss_indirection_tbl = rss_ind_tbl; 58 memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key, 59 HCLGE_COMM_RSS_KEY_SIZE); 60 61 hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg); 62 63 return 0; 64 } 65 66 void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, 67 u16 *tc_valid, u16 *tc_size) 68 { 69 u16 roundup_size; 70 u32 i; 71 72 roundup_size = roundup_pow_of_two(rss_size); 73 roundup_size = ilog2(roundup_size); 74 75 for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { 76 tc_valid[i] = 1; 77 tc_size[i] = roundup_size; 78 tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; 79 } 80 } 81 82 int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, 83 u16 *tc_valid, u16 *tc_size) 84 { 85 struct hclge_comm_rss_tc_mode_cmd *req; 86 struct hclge_desc desc; 87 unsigned int i; 88 int ret; 89 90 req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data; 91 92 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_RSS_TC_MODE, 93 false); 94 for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { 95 u16 mode = 0; 96 97 hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B, 98 (tc_valid[i] & 0x1)); 99 hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M, 100 HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]); 101 hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B, 102 tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET & 103 0x1); 104 hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M, 105 HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]); 106 107 req->rss_tc_mode[i] = cpu_to_le16(mode); 108 } 109 110 ret = hclge_comm_cmd_send(hw, &desc, 1); 111 if (ret) 112 dev_err(&hw->cmq.csq.pdev->dev, 113 "failed to set rss tc mode, ret = %d.\n", ret); 114 115 return ret; 116 } 117 118 int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, 119 struct hclge_comm_hw *hw, const u8 *key, 120 const u8 hfunc) 121 { 122 u8 hash_algo; 123 int ret; 124 125 ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); 126 if (ret) 127 return ret; 128 129 /* Set the RSS Hash Key if specififed by the user */ 130 if (key) { 131 ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key); 132 if (ret) 133 return ret; 134 135 /* Update the shadow RSS key with user specified qids */ 136 memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE); 137 } else { 138 ret = hclge_comm_set_rss_algo_key(hw, hash_algo, 139 rss_cfg->rss_hash_key); 140 if (ret) 141 return ret; 142 } 143 rss_cfg->rss_algo = hash_algo; 144 145 return 0; 146 } 147 148 int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, 149 struct hclge_comm_hw *hw, 150 struct hclge_comm_rss_cfg *rss_cfg, 151 struct ethtool_rxnfc *nfc) 152 { 153 struct hclge_comm_rss_input_tuple_cmd *req; 154 struct hclge_desc desc; 155 int ret; 156 157 if (nfc->data & 158 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) 159 return -EINVAL; 160 161 req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; 162 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_RSS_INPUT_TUPLE, 163 false); 164 165 ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req); 166 if (ret) { 167 dev_err(&hw->cmq.csq.pdev->dev, 168 "failed to init rss tuple cmd, ret = %d.\n", ret); 169 return ret; 170 } 171 172 ret = hclge_comm_cmd_send(hw, &desc, 1); 173 if (ret) { 174 dev_err(&hw->cmq.csq.pdev->dev, 175 "failed to set rss tuple, ret = %d.\n", ret); 176 return ret; 177 } 178 179 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 180 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 181 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 182 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 183 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 184 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 185 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 186 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 187 return 0; 188 } 189 190 u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) 191 { 192 return HCLGE_COMM_RSS_KEY_SIZE; 193 } 194 195 void hclge_comm_get_rss_type(struct hnae3_handle *nic, 196 struct hclge_comm_rss_tuple_cfg *rss_tuple_sets) 197 { 198 if (rss_tuple_sets->ipv4_tcp_en || 199 rss_tuple_sets->ipv4_udp_en || 200 rss_tuple_sets->ipv4_sctp_en || 201 rss_tuple_sets->ipv6_tcp_en || 202 rss_tuple_sets->ipv6_udp_en || 203 rss_tuple_sets->ipv6_sctp_en) 204 nic->kinfo.rss_type = PKT_HASH_TYPE_L4; 205 else if (rss_tuple_sets->ipv4_fragment_en || 206 rss_tuple_sets->ipv6_fragment_en) 207 nic->kinfo.rss_type = PKT_HASH_TYPE_L3; 208 else 209 nic->kinfo.rss_type = PKT_HASH_TYPE_NONE; 210 } 211 212 int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, 213 const u8 hfunc, u8 *hash_algo) 214 { 215 switch (hfunc) { 216 case ETH_RSS_HASH_TOP: 217 *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; 218 return 0; 219 case ETH_RSS_HASH_XOR: 220 *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; 221 return 0; 222 case ETH_RSS_HASH_NO_CHANGE: 223 *hash_algo = rss_cfg->rss_algo; 224 return 0; 225 default: 226 return -EINVAL; 227 } 228 } 229 230 void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, 231 struct hclge_comm_rss_cfg *rss_cfg) 232 { 233 u16 i; 234 /* Initialize RSS indirect table */ 235 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 236 rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; 237 } 238 239 int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, 240 u8 *tuple_sets) 241 { 242 switch (flow_type) { 243 case TCP_V4_FLOW: 244 *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 245 break; 246 case UDP_V4_FLOW: 247 *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; 248 break; 249 case TCP_V6_FLOW: 250 *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 251 break; 252 case UDP_V6_FLOW: 253 *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; 254 break; 255 case SCTP_V4_FLOW: 256 *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 257 break; 258 case SCTP_V6_FLOW: 259 *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 260 break; 261 case IPV4_FLOW: 262 case IPV6_FLOW: 263 *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT; 264 break; 265 default: 266 return -EINVAL; 267 } 268 269 return 0; 270 } 271 272 static void 273 hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, 274 u16 qid, u32 j) 275 { 276 u8 rss_msb_oft; 277 u8 rss_msb_val; 278 279 rss_msb_oft = 280 j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; 281 rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) << 282 (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); 283 req->rss_qid_h[rss_msb_oft] |= rss_msb_val; 284 } 285 286 int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, 287 struct hclge_comm_hw *hw, const u16 *indir) 288 { 289 struct hclge_comm_rss_ind_tbl_cmd *req; 290 struct hclge_desc desc; 291 u16 rss_cfg_tbl_num; 292 int ret; 293 u16 qid; 294 u16 i; 295 u32 j; 296 297 req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data; 298 rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size / 299 HCLGE_COMM_RSS_CFG_TBL_SIZE; 300 301 for (i = 0; i < rss_cfg_tbl_num; i++) { 302 hclge_comm_cmd_setup_basic_desc(&desc, 303 HCLGE_COMM_OPC_RSS_INDIR_TABLE, 304 false); 305 306 req->start_table_index = 307 cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE); 308 req->rss_set_bitmap = 309 cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK); 310 for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) { 311 qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j]; 312 req->rss_qid_l[j] = qid & 0xff; 313 hclge_comm_append_rss_msb_info(req, qid, j); 314 } 315 ret = hclge_comm_cmd_send(hw, &desc, 1); 316 if (ret) { 317 dev_err(&hw->cmq.csq.pdev->dev, 318 "failed to configure rss table, ret = %d.\n", 319 ret); 320 return ret; 321 } 322 } 323 return 0; 324 } 325 326 int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, 327 struct hclge_comm_hw *hw, bool is_pf, 328 struct hclge_comm_rss_cfg *rss_cfg) 329 { 330 struct hclge_comm_rss_input_tuple_cmd *req; 331 struct hclge_desc desc; 332 int ret; 333 334 hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_RSS_INPUT_TUPLE, 335 false); 336 337 req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; 338 339 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 340 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 341 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 342 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 343 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 344 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 345 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 346 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 347 348 if (is_pf) 349 hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets); 350 351 ret = hclge_comm_cmd_send(hw, &desc, 1); 352 if (ret) 353 dev_err(&hw->cmq.csq.pdev->dev, 354 "failed to configure rss input, ret = %d.\n", ret); 355 return ret; 356 } 357 358 void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, 359 u8 *hfunc) 360 { 361 /* Get hash algorithm */ 362 if (hfunc) { 363 switch (rss_cfg->rss_algo) { 364 case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ: 365 *hfunc = ETH_RSS_HASH_TOP; 366 break; 367 case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE: 368 *hfunc = ETH_RSS_HASH_XOR; 369 break; 370 default: 371 *hfunc = ETH_RSS_HASH_UNKNOWN; 372 break; 373 } 374 } 375 376 /* Get the RSS Key required by the user */ 377 if (key) 378 memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); 379 } 380 381 void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, 382 u32 *indir, u16 rss_ind_tbl_size) 383 { 384 u16 i; 385 386 if (!indir) 387 return; 388 389 for (i = 0; i < rss_ind_tbl_size; i++) 390 indir[i] = rss_cfg->rss_indirection_tbl[i]; 391 } 392 393 int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, 394 const u8 *key) 395 { 396 struct hclge_comm_rss_config_cmd *req; 397 unsigned int key_offset = 0; 398 struct hclge_desc desc; 399 int key_counts; 400 int key_size; 401 int ret; 402 403 key_counts = HCLGE_COMM_RSS_KEY_SIZE; 404 req = (struct hclge_comm_rss_config_cmd *)desc.data; 405 406 while (key_counts) { 407 hclge_comm_cmd_setup_basic_desc(&desc, 408 HCLGE_COMM_OPC_RSS_GENERIC_CFG, 409 false); 410 411 req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK); 412 req->hash_config |= 413 (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B); 414 415 key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts); 416 memcpy(req->hash_key, 417 key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM, 418 key_size); 419 420 key_counts -= key_size; 421 key_offset++; 422 ret = hclge_comm_cmd_send(hw, &desc, 1); 423 if (ret) { 424 dev_err(&hw->cmq.csq.pdev->dev, 425 "failed to configure RSS key, ret = %d.\n", 426 ret); 427 return ret; 428 } 429 } 430 431 return 0; 432 } 433 434 static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 435 { 436 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0; 437 438 if (nfc->data & RXH_L4_B_2_3) 439 hash_sets |= HCLGE_COMM_D_PORT_BIT; 440 else 441 hash_sets &= ~HCLGE_COMM_D_PORT_BIT; 442 443 if (nfc->data & RXH_IP_SRC) 444 hash_sets |= HCLGE_COMM_S_IP_BIT; 445 else 446 hash_sets &= ~HCLGE_COMM_S_IP_BIT; 447 448 if (nfc->data & RXH_IP_DST) 449 hash_sets |= HCLGE_COMM_D_IP_BIT; 450 else 451 hash_sets &= ~HCLGE_COMM_D_IP_BIT; 452 453 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 454 hash_sets |= HCLGE_COMM_V_TAG_BIT; 455 456 return hash_sets; 457 } 458 459 int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, 460 struct ethtool_rxnfc *nfc, 461 struct hnae3_ae_dev *ae_dev, 462 struct hclge_comm_rss_input_tuple_cmd *req) 463 { 464 u8 tuple_sets; 465 466 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; 467 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; 468 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; 469 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; 470 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; 471 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; 472 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; 473 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; 474 475 tuple_sets = hclge_comm_get_rss_hash_bits(nfc); 476 switch (nfc->flow_type) { 477 case TCP_V4_FLOW: 478 req->ipv4_tcp_en = tuple_sets; 479 break; 480 case TCP_V6_FLOW: 481 req->ipv6_tcp_en = tuple_sets; 482 break; 483 case UDP_V4_FLOW: 484 req->ipv4_udp_en = tuple_sets; 485 break; 486 case UDP_V6_FLOW: 487 req->ipv6_udp_en = tuple_sets; 488 break; 489 case SCTP_V4_FLOW: 490 req->ipv4_sctp_en = tuple_sets; 491 break; 492 case SCTP_V6_FLOW: 493 if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 494 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 495 return -EINVAL; 496 497 req->ipv6_sctp_en = tuple_sets; 498 break; 499 case IPV4_FLOW: 500 req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 501 break; 502 case IPV6_FLOW: 503 req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; 504 break; 505 default: 506 return -EINVAL; 507 } 508 509 return 0; 510 } 511 512 u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) 513 { 514 u64 tuple_data = 0; 515 516 if (tuple_sets & HCLGE_COMM_D_PORT_BIT) 517 tuple_data |= RXH_L4_B_2_3; 518 if (tuple_sets & HCLGE_COMM_S_PORT_BIT) 519 tuple_data |= RXH_L4_B_0_1; 520 if (tuple_sets & HCLGE_COMM_D_IP_BIT) 521 tuple_data |= RXH_IP_DST; 522 if (tuple_sets & HCLGE_COMM_S_IP_BIT) 523 tuple_data |= RXH_IP_SRC; 524 525 return tuple_data; 526 } 527