1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_osdep.h" 6 #include "i40e_register.h" 7 #include "i40e_type.h" 8 #include "i40e_hmc.h" 9 #include "i40e_lan_hmc.h" 10 #include "i40e_prototype.h" 11 12 /* lan specific interface functions */ 13 14 /** 15 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes 16 * @offset: base address offset needing alignment 17 * 18 * Aligns the layer 2 function private memory so it's 512-byte aligned. 19 **/ 20 static u64 i40e_align_l2obj_base(u64 offset) 21 { 22 u64 aligned_offset = offset; 23 24 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) 25 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - 26 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); 27 28 return aligned_offset; 29 } 30 31 /** 32 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size 33 * @txq_num: number of Tx queues needing backing context 34 * @rxq_num: number of Rx queues needing backing context 35 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context 36 * @fcoe_filt_num: number of FCoE filters needing backing context 37 * 38 * Calculates the maximum amount of memory for the function required, based 39 * on the number of resources it must provide context for. 40 **/ 41 static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, 42 u32 fcoe_cntx_num, u32 fcoe_filt_num) 43 { 44 u64 fpm_size = 0; 45 46 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; 47 fpm_size = i40e_align_l2obj_base(fpm_size); 48 49 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); 50 fpm_size = i40e_align_l2obj_base(fpm_size); 51 52 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); 53 fpm_size = i40e_align_l2obj_base(fpm_size); 54 55 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); 56 fpm_size = i40e_align_l2obj_base(fpm_size); 57 58 return fpm_size; 59 } 60 61 /** 62 * i40e_init_lan_hmc - initialize i40e_hmc_info struct 63 * @hw: pointer to the HW structure 64 * @txq_num: number of Tx queues needing backing context 65 * @rxq_num: number of Rx queues needing backing context 66 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context 67 * @fcoe_filt_num: number of FCoE filters needing backing context 68 * 69 * This function will be called once per physical function initialization. 70 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on 71 * the driver's provided input, as well as information from the HMC itself 72 * loaded from NVRAM. 73 * 74 * Assumptions: 75 * - HMC Resource Profile has been selected before calling this function. 76 **/ 77 int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, 78 u32 rxq_num, u32 fcoe_cntx_num, 79 u32 fcoe_filt_num) 80 { 81 struct i40e_hmc_obj_info *obj, *full_obj; 82 int ret_code = 0; 83 u64 l2fpm_size; 84 u32 size_exp; 85 86 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; 87 hw->hmc.hmc_fn_id = hw->pf_id; 88 89 /* allocate memory for hmc_obj */ 90 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, 91 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); 92 if (ret_code) 93 goto init_lan_hmc_out; 94 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) 95 hw->hmc.hmc_obj_virt_mem.va; 96 97 /* The full object will be used to create the LAN HMC SD */ 98 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; 99 full_obj->max_cnt = 0; 100 full_obj->cnt = 0; 101 full_obj->base = 0; 102 full_obj->size = 0; 103 104 /* Tx queue context information */ 105 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; 106 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); 107 obj->cnt = txq_num; 108 obj->base = 0; 109 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); 110 obj->size = BIT_ULL(size_exp); 111 112 /* validate values requested by driver don't exceed HMC capacity */ 113 if (txq_num > obj->max_cnt) { 114 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 115 hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 116 txq_num, obj->max_cnt, ret_code); 117 goto init_lan_hmc_out; 118 } 119 120 /* aggregate values into the full LAN object for later */ 121 full_obj->max_cnt += obj->max_cnt; 122 full_obj->cnt += obj->cnt; 123 124 /* Rx queue context information */ 125 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; 126 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); 127 obj->cnt = rxq_num; 128 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + 129 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * 130 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); 131 obj->base = i40e_align_l2obj_base(obj->base); 132 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); 133 obj->size = BIT_ULL(size_exp); 134 135 /* validate values requested by driver don't exceed HMC capacity */ 136 if (rxq_num > obj->max_cnt) { 137 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 138 hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 139 rxq_num, obj->max_cnt, ret_code); 140 goto init_lan_hmc_out; 141 } 142 143 /* aggregate values into the full LAN object for later */ 144 full_obj->max_cnt += obj->max_cnt; 145 full_obj->cnt += obj->cnt; 146 147 /* FCoE context information */ 148 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; 149 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); 150 obj->cnt = fcoe_cntx_num; 151 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + 152 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * 153 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); 154 obj->base = i40e_align_l2obj_base(obj->base); 155 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); 156 obj->size = BIT_ULL(size_exp); 157 158 /* validate values requested by driver don't exceed HMC capacity */ 159 if (fcoe_cntx_num > obj->max_cnt) { 160 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 161 hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 162 fcoe_cntx_num, obj->max_cnt, ret_code); 163 goto init_lan_hmc_out; 164 } 165 166 /* aggregate values into the full LAN object for later */ 167 full_obj->max_cnt += obj->max_cnt; 168 full_obj->cnt += obj->cnt; 169 170 /* FCoE filter information */ 171 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; 172 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); 173 obj->cnt = fcoe_filt_num; 174 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + 175 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * 176 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); 177 obj->base = i40e_align_l2obj_base(obj->base); 178 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); 179 obj->size = BIT_ULL(size_exp); 180 181 /* validate values requested by driver don't exceed HMC capacity */ 182 if (fcoe_filt_num > obj->max_cnt) { 183 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 184 hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 185 fcoe_filt_num, obj->max_cnt, ret_code); 186 goto init_lan_hmc_out; 187 } 188 189 /* aggregate values into the full LAN object for later */ 190 full_obj->max_cnt += obj->max_cnt; 191 full_obj->cnt += obj->cnt; 192 193 hw->hmc.first_sd_index = 0; 194 hw->hmc.sd_table.ref_cnt = 0; 195 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, 196 fcoe_filt_num); 197 if (NULL == hw->hmc.sd_table.sd_entry) { 198 hw->hmc.sd_table.sd_cnt = (u32) 199 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / 200 I40E_HMC_DIRECT_BP_SIZE; 201 202 /* allocate the sd_entry members in the sd_table */ 203 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, 204 (sizeof(struct i40e_hmc_sd_entry) * 205 hw->hmc.sd_table.sd_cnt)); 206 if (ret_code) 207 goto init_lan_hmc_out; 208 hw->hmc.sd_table.sd_entry = 209 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; 210 } 211 /* store in the LAN full object for later */ 212 full_obj->size = l2fpm_size; 213 214 init_lan_hmc_out: 215 return ret_code; 216 } 217 218 /** 219 * i40e_remove_pd_page - Remove a page from the page descriptor table 220 * @hw: pointer to the HW structure 221 * @hmc_info: pointer to the HMC configuration information structure 222 * @idx: segment descriptor index to find the relevant page descriptor 223 * 224 * This function: 225 * 1. Marks the entry in pd table (for paged address mode) invalid 226 * 2. write to register PMPDINV to invalidate the backing page in FV cache 227 * 3. Decrement the ref count for pd_entry 228 * assumptions: 229 * 1. caller can deallocate the memory used by pd after this function 230 * returns. 231 **/ 232 static int i40e_remove_pd_page(struct i40e_hw *hw, 233 struct i40e_hmc_info *hmc_info, 234 u32 idx) 235 { 236 int ret_code = 0; 237 238 if (!i40e_prep_remove_pd_page(hmc_info, idx)) 239 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); 240 241 return ret_code; 242 } 243 244 /** 245 * i40e_remove_sd_bp - remove a backing page from a segment descriptor 246 * @hw: pointer to our HW structure 247 * @hmc_info: pointer to the HMC configuration information structure 248 * @idx: the page index 249 * 250 * This function: 251 * 1. Marks the entry in sd table (for direct address mode) invalid 252 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set 253 * to 0) and PMSDDATAHIGH to invalidate the sd page 254 * 3. Decrement the ref count for the sd_entry 255 * assumptions: 256 * 1. caller can deallocate the memory used by backing storage after this 257 * function returns. 258 **/ 259 static int i40e_remove_sd_bp(struct i40e_hw *hw, 260 struct i40e_hmc_info *hmc_info, 261 u32 idx) 262 { 263 int ret_code = 0; 264 265 if (!i40e_prep_remove_sd_bp(hmc_info, idx)) 266 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); 267 268 return ret_code; 269 } 270 271 /** 272 * i40e_create_lan_hmc_object - allocate backing store for hmc objects 273 * @hw: pointer to the HW structure 274 * @info: pointer to i40e_hmc_create_obj_info struct 275 * 276 * This will allocate memory for PDs and backing pages and populate 277 * the sd and pd entries. 278 **/ 279 static int i40e_create_lan_hmc_object(struct i40e_hw *hw, 280 struct i40e_hmc_lan_create_obj_info *info) 281 { 282 struct i40e_hmc_sd_entry *sd_entry; 283 u32 pd_idx1 = 0, pd_lmt1 = 0; 284 u32 pd_idx = 0, pd_lmt = 0; 285 bool pd_error = false; 286 u32 sd_idx, sd_lmt; 287 int ret_code = 0; 288 u64 sd_size; 289 u32 i, j; 290 291 if (NULL == info) { 292 ret_code = I40E_ERR_BAD_PTR; 293 hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); 294 goto exit; 295 } 296 if (NULL == info->hmc_info) { 297 ret_code = I40E_ERR_BAD_PTR; 298 hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); 299 goto exit; 300 } 301 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { 302 ret_code = I40E_ERR_BAD_PTR; 303 hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); 304 goto exit; 305 } 306 307 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 308 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 309 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", 310 ret_code); 311 goto exit; 312 } 313 if ((info->start_idx + info->count) > 314 info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 315 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 316 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", 317 ret_code); 318 goto exit; 319 } 320 321 /* find sd index and limit */ 322 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 323 info->start_idx, info->count, 324 &sd_idx, &sd_lmt); 325 if (sd_idx >= info->hmc_info->sd_table.sd_cnt || 326 sd_lmt > info->hmc_info->sd_table.sd_cnt) { 327 ret_code = I40E_ERR_INVALID_SD_INDEX; 328 goto exit; 329 } 330 /* find pd index */ 331 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 332 info->start_idx, info->count, &pd_idx, 333 &pd_lmt); 334 335 /* This is to cover for cases where you may not want to have an SD with 336 * the full 2M memory but something smaller. By not filling out any 337 * size, the function will default the SD size to be 2M. 338 */ 339 if (info->direct_mode_sz == 0) 340 sd_size = I40E_HMC_DIRECT_BP_SIZE; 341 else 342 sd_size = info->direct_mode_sz; 343 344 /* check if all the sds are valid. If not, allocate a page and 345 * initialize it. 346 */ 347 for (j = sd_idx; j < sd_lmt; j++) { 348 /* update the sd table entry */ 349 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, 350 info->entry_type, 351 sd_size); 352 if (ret_code) 353 goto exit_sd_error; 354 sd_entry = &info->hmc_info->sd_table.sd_entry[j]; 355 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { 356 /* check if all the pds in this sd are valid. If not, 357 * allocate a page and initialize it. 358 */ 359 360 /* find pd_idx and pd_lmt in this sd */ 361 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); 362 pd_lmt1 = min(pd_lmt, 363 ((j + 1) * I40E_HMC_MAX_BP_COUNT)); 364 for (i = pd_idx1; i < pd_lmt1; i++) { 365 /* update the pd table entry */ 366 ret_code = i40e_add_pd_table_entry(hw, 367 info->hmc_info, 368 i, NULL); 369 if (ret_code) { 370 pd_error = true; 371 break; 372 } 373 } 374 if (pd_error) { 375 /* remove the backing pages from pd_idx1 to i */ 376 while (i && (i > pd_idx1)) { 377 i40e_remove_pd_bp(hw, info->hmc_info, 378 (i - 1)); 379 i--; 380 } 381 } 382 } 383 if (!sd_entry->valid) { 384 sd_entry->valid = true; 385 switch (sd_entry->entry_type) { 386 case I40E_SD_TYPE_PAGED: 387 I40E_SET_PF_SD_ENTRY(hw, 388 sd_entry->u.pd_table.pd_page_addr.pa, 389 j, sd_entry->entry_type); 390 break; 391 case I40E_SD_TYPE_DIRECT: 392 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, 393 j, sd_entry->entry_type); 394 break; 395 default: 396 ret_code = I40E_ERR_INVALID_SD_TYPE; 397 goto exit; 398 } 399 } 400 } 401 goto exit; 402 403 exit_sd_error: 404 /* cleanup for sd entries from j to sd_idx */ 405 while (j && (j > sd_idx)) { 406 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; 407 switch (sd_entry->entry_type) { 408 case I40E_SD_TYPE_PAGED: 409 pd_idx1 = max(pd_idx, 410 ((j - 1) * I40E_HMC_MAX_BP_COUNT)); 411 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); 412 for (i = pd_idx1; i < pd_lmt1; i++) 413 i40e_remove_pd_bp(hw, info->hmc_info, i); 414 i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); 415 break; 416 case I40E_SD_TYPE_DIRECT: 417 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); 418 break; 419 default: 420 ret_code = I40E_ERR_INVALID_SD_TYPE; 421 break; 422 } 423 j--; 424 } 425 exit: 426 return ret_code; 427 } 428 429 /** 430 * i40e_configure_lan_hmc - prepare the HMC backing store 431 * @hw: pointer to the hw structure 432 * @model: the model for the layout of the SD/PD tables 433 * 434 * - This function will be called once per physical function initialization. 435 * - This function will be called after i40e_init_lan_hmc() and before 436 * any LAN/FCoE HMC objects can be created. 437 **/ 438 int i40e_configure_lan_hmc(struct i40e_hw *hw, 439 enum i40e_hmc_model model) 440 { 441 struct i40e_hmc_lan_create_obj_info info; 442 u8 hmc_fn_id = hw->hmc.hmc_fn_id; 443 struct i40e_hmc_obj_info *obj; 444 int ret_code = 0; 445 446 /* Initialize part of the create object info struct */ 447 info.hmc_info = &hw->hmc; 448 info.rsrc_type = I40E_HMC_LAN_FULL; 449 info.start_idx = 0; 450 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; 451 452 /* Build the SD entry for the LAN objects */ 453 switch (model) { 454 case I40E_HMC_MODEL_DIRECT_PREFERRED: 455 case I40E_HMC_MODEL_DIRECT_ONLY: 456 info.entry_type = I40E_SD_TYPE_DIRECT; 457 /* Make one big object, a single SD */ 458 info.count = 1; 459 ret_code = i40e_create_lan_hmc_object(hw, &info); 460 if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) 461 goto try_type_paged; 462 else if (ret_code) 463 goto configure_lan_hmc_out; 464 /* else clause falls through the break */ 465 break; 466 case I40E_HMC_MODEL_PAGED_ONLY: 467 try_type_paged: 468 info.entry_type = I40E_SD_TYPE_PAGED; 469 /* Make one big object in the PD table */ 470 info.count = 1; 471 ret_code = i40e_create_lan_hmc_object(hw, &info); 472 if (ret_code) 473 goto configure_lan_hmc_out; 474 break; 475 default: 476 /* unsupported type */ 477 ret_code = I40E_ERR_INVALID_SD_TYPE; 478 hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", 479 ret_code); 480 goto configure_lan_hmc_out; 481 } 482 483 /* Configure and program the FPM registers so objects can be created */ 484 485 /* Tx contexts */ 486 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; 487 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), 488 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); 489 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); 490 491 /* Rx contexts */ 492 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; 493 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), 494 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); 495 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); 496 497 /* FCoE contexts */ 498 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; 499 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), 500 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); 501 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); 502 503 /* FCoE filters */ 504 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; 505 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), 506 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); 507 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); 508 509 configure_lan_hmc_out: 510 return ret_code; 511 } 512 513 /** 514 * i40e_delete_lan_hmc_object - remove hmc objects 515 * @hw: pointer to the HW structure 516 * @info: pointer to i40e_hmc_delete_obj_info struct 517 * 518 * This will de-populate the SDs and PDs. It frees 519 * the memory for PDS and backing storage. After this function is returned, 520 * caller should deallocate memory allocated previously for 521 * book-keeping information about PDs and backing storage. 522 **/ 523 static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, 524 struct i40e_hmc_lan_delete_obj_info *info) 525 { 526 struct i40e_hmc_pd_table *pd_table; 527 u32 pd_idx, pd_lmt, rel_pd_idx; 528 u32 sd_idx, sd_lmt; 529 int ret_code = 0; 530 u32 i, j; 531 532 if (NULL == info) { 533 ret_code = I40E_ERR_BAD_PTR; 534 hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); 535 goto exit; 536 } 537 if (NULL == info->hmc_info) { 538 ret_code = I40E_ERR_BAD_PTR; 539 hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); 540 goto exit; 541 } 542 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { 543 ret_code = I40E_ERR_BAD_PTR; 544 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); 545 goto exit; 546 } 547 548 if (NULL == info->hmc_info->sd_table.sd_entry) { 549 ret_code = I40E_ERR_BAD_PTR; 550 hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); 551 goto exit; 552 } 553 554 if (NULL == info->hmc_info->hmc_obj) { 555 ret_code = I40E_ERR_BAD_PTR; 556 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); 557 goto exit; 558 } 559 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 560 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 561 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", 562 ret_code); 563 goto exit; 564 } 565 566 if ((info->start_idx + info->count) > 567 info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 568 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 569 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", 570 ret_code); 571 goto exit; 572 } 573 574 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 575 info->start_idx, info->count, &pd_idx, 576 &pd_lmt); 577 578 for (j = pd_idx; j < pd_lmt; j++) { 579 sd_idx = j / I40E_HMC_PD_CNT_IN_SD; 580 581 if (I40E_SD_TYPE_PAGED != 582 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) 583 continue; 584 585 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; 586 587 pd_table = 588 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 589 if (pd_table->pd_entry[rel_pd_idx].valid) { 590 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); 591 if (ret_code) 592 goto exit; 593 } 594 } 595 596 /* find sd index and limit */ 597 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 598 info->start_idx, info->count, 599 &sd_idx, &sd_lmt); 600 if (sd_idx >= info->hmc_info->sd_table.sd_cnt || 601 sd_lmt > info->hmc_info->sd_table.sd_cnt) { 602 ret_code = I40E_ERR_INVALID_SD_INDEX; 603 goto exit; 604 } 605 606 for (i = sd_idx; i < sd_lmt; i++) { 607 if (!info->hmc_info->sd_table.sd_entry[i].valid) 608 continue; 609 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { 610 case I40E_SD_TYPE_DIRECT: 611 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); 612 if (ret_code) 613 goto exit; 614 break; 615 case I40E_SD_TYPE_PAGED: 616 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); 617 if (ret_code) 618 goto exit; 619 break; 620 default: 621 break; 622 } 623 } 624 exit: 625 return ret_code; 626 } 627 628 /** 629 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory 630 * @hw: pointer to the hw structure 631 * 632 * This must be called by drivers as they are shutting down and being 633 * removed from the OS. 634 **/ 635 int i40e_shutdown_lan_hmc(struct i40e_hw *hw) 636 { 637 struct i40e_hmc_lan_delete_obj_info info; 638 int ret_code; 639 640 info.hmc_info = &hw->hmc; 641 info.rsrc_type = I40E_HMC_LAN_FULL; 642 info.start_idx = 0; 643 info.count = 1; 644 645 /* delete the object */ 646 ret_code = i40e_delete_lan_hmc_object(hw, &info); 647 648 /* free the SD table entry for LAN */ 649 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); 650 hw->hmc.sd_table.sd_cnt = 0; 651 hw->hmc.sd_table.sd_entry = NULL; 652 653 /* free memory used for hmc_obj */ 654 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); 655 hw->hmc.hmc_obj = NULL; 656 657 return ret_code; 658 } 659 660 #define I40E_HMC_STORE(_struct, _ele) \ 661 offsetof(struct _struct, _ele), \ 662 sizeof_field(struct _struct, _ele) 663 664 struct i40e_context_ele { 665 u16 offset; 666 u16 size_of; 667 u16 width; 668 u16 lsb; 669 }; 670 671 /* LAN Tx Queue Context */ 672 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { 673 /* Field Width LSB */ 674 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, 675 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, 676 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, 677 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, 678 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, 679 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, 680 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, 681 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, 682 /* line 1 */ 683 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, 684 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, 685 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, 686 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, 687 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, 688 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, 689 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, 690 /* line 7 */ 691 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, 692 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, 693 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, 694 { 0 } 695 }; 696 697 /* LAN Rx Queue Context */ 698 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { 699 /* Field Width LSB */ 700 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, 701 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, 702 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, 703 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, 704 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, 705 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, 706 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, 707 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, 708 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, 709 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, 710 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, 711 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, 712 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, 713 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, 714 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, 715 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, 716 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, 717 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, 718 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, 719 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, 720 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, 721 { 0 } 722 }; 723 724 /** 725 * i40e_write_byte - replace HMC context byte 726 * @hmc_bits: pointer to the HMC memory 727 * @ce_info: a description of the struct to be read from 728 * @src: the struct to be read from 729 **/ 730 static void i40e_write_byte(u8 *hmc_bits, 731 struct i40e_context_ele *ce_info, 732 u8 *src) 733 { 734 u8 src_byte, dest_byte, mask; 735 u8 *from, *dest; 736 u16 shift_width; 737 738 /* copy from the next struct field */ 739 from = src + ce_info->offset; 740 741 /* prepare the bits and mask */ 742 shift_width = ce_info->lsb % 8; 743 mask = (u8)(BIT(ce_info->width) - 1); 744 745 src_byte = *from; 746 src_byte &= mask; 747 748 /* shift to correct alignment */ 749 mask <<= shift_width; 750 src_byte <<= shift_width; 751 752 /* get the current bits from the target bit string */ 753 dest = hmc_bits + (ce_info->lsb / 8); 754 755 memcpy(&dest_byte, dest, sizeof(dest_byte)); 756 757 dest_byte &= ~mask; /* get the bits not changing */ 758 dest_byte |= src_byte; /* add in the new bits */ 759 760 /* put it all back */ 761 memcpy(dest, &dest_byte, sizeof(dest_byte)); 762 } 763 764 /** 765 * i40e_write_word - replace HMC context word 766 * @hmc_bits: pointer to the HMC memory 767 * @ce_info: a description of the struct to be read from 768 * @src: the struct to be read from 769 **/ 770 static void i40e_write_word(u8 *hmc_bits, 771 struct i40e_context_ele *ce_info, 772 u8 *src) 773 { 774 u16 src_word, mask; 775 u8 *from, *dest; 776 u16 shift_width; 777 __le16 dest_word; 778 779 /* copy from the next struct field */ 780 from = src + ce_info->offset; 781 782 /* prepare the bits and mask */ 783 shift_width = ce_info->lsb % 8; 784 mask = BIT(ce_info->width) - 1; 785 786 /* don't swizzle the bits until after the mask because the mask bits 787 * will be in a different bit position on big endian machines 788 */ 789 src_word = *(u16 *)from; 790 src_word &= mask; 791 792 /* shift to correct alignment */ 793 mask <<= shift_width; 794 src_word <<= shift_width; 795 796 /* get the current bits from the target bit string */ 797 dest = hmc_bits + (ce_info->lsb / 8); 798 799 memcpy(&dest_word, dest, sizeof(dest_word)); 800 801 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 802 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 803 804 /* put it all back */ 805 memcpy(dest, &dest_word, sizeof(dest_word)); 806 } 807 808 /** 809 * i40e_write_dword - replace HMC context dword 810 * @hmc_bits: pointer to the HMC memory 811 * @ce_info: a description of the struct to be read from 812 * @src: the struct to be read from 813 **/ 814 static void i40e_write_dword(u8 *hmc_bits, 815 struct i40e_context_ele *ce_info, 816 u8 *src) 817 { 818 u32 src_dword, mask; 819 u8 *from, *dest; 820 u16 shift_width; 821 __le32 dest_dword; 822 823 /* copy from the next struct field */ 824 from = src + ce_info->offset; 825 826 /* prepare the bits and mask */ 827 shift_width = ce_info->lsb % 8; 828 829 /* if the field width is exactly 32 on an x86 machine, then the shift 830 * operation will not work because the SHL instructions count is masked 831 * to 5 bits so the shift will do nothing 832 */ 833 if (ce_info->width < 32) 834 mask = BIT(ce_info->width) - 1; 835 else 836 mask = ~(u32)0; 837 838 /* don't swizzle the bits until after the mask because the mask bits 839 * will be in a different bit position on big endian machines 840 */ 841 src_dword = *(u32 *)from; 842 src_dword &= mask; 843 844 /* shift to correct alignment */ 845 mask <<= shift_width; 846 src_dword <<= shift_width; 847 848 /* get the current bits from the target bit string */ 849 dest = hmc_bits + (ce_info->lsb / 8); 850 851 memcpy(&dest_dword, dest, sizeof(dest_dword)); 852 853 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 854 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 855 856 /* put it all back */ 857 memcpy(dest, &dest_dword, sizeof(dest_dword)); 858 } 859 860 /** 861 * i40e_write_qword - replace HMC context qword 862 * @hmc_bits: pointer to the HMC memory 863 * @ce_info: a description of the struct to be read from 864 * @src: the struct to be read from 865 **/ 866 static void i40e_write_qword(u8 *hmc_bits, 867 struct i40e_context_ele *ce_info, 868 u8 *src) 869 { 870 u64 src_qword, mask; 871 u8 *from, *dest; 872 u16 shift_width; 873 __le64 dest_qword; 874 875 /* copy from the next struct field */ 876 from = src + ce_info->offset; 877 878 /* prepare the bits and mask */ 879 shift_width = ce_info->lsb % 8; 880 881 /* if the field width is exactly 64 on an x86 machine, then the shift 882 * operation will not work because the SHL instructions count is masked 883 * to 6 bits so the shift will do nothing 884 */ 885 if (ce_info->width < 64) 886 mask = BIT_ULL(ce_info->width) - 1; 887 else 888 mask = ~(u64)0; 889 890 /* don't swizzle the bits until after the mask because the mask bits 891 * will be in a different bit position on big endian machines 892 */ 893 src_qword = *(u64 *)from; 894 src_qword &= mask; 895 896 /* shift to correct alignment */ 897 mask <<= shift_width; 898 src_qword <<= shift_width; 899 900 /* get the current bits from the target bit string */ 901 dest = hmc_bits + (ce_info->lsb / 8); 902 903 memcpy(&dest_qword, dest, sizeof(dest_qword)); 904 905 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 906 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 907 908 /* put it all back */ 909 memcpy(dest, &dest_qword, sizeof(dest_qword)); 910 } 911 912 /** 913 * i40e_clear_hmc_context - zero out the HMC context bits 914 * @hw: the hardware struct 915 * @context_bytes: pointer to the context bit array (DMA memory) 916 * @hmc_type: the type of HMC resource 917 **/ 918 static int i40e_clear_hmc_context(struct i40e_hw *hw, 919 u8 *context_bytes, 920 enum i40e_hmc_lan_rsrc_type hmc_type) 921 { 922 /* clean the bit array */ 923 memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); 924 925 return 0; 926 } 927 928 /** 929 * i40e_set_hmc_context - replace HMC context bits 930 * @context_bytes: pointer to the context bit array 931 * @ce_info: a description of the struct to be filled 932 * @dest: the struct to be filled 933 **/ 934 static int i40e_set_hmc_context(u8 *context_bytes, 935 struct i40e_context_ele *ce_info, 936 u8 *dest) 937 { 938 int f; 939 940 for (f = 0; ce_info[f].width != 0; f++) { 941 942 /* we have to deal with each element of the HMC using the 943 * correct size so that we are correct regardless of the 944 * endianness of the machine 945 */ 946 switch (ce_info[f].size_of) { 947 case 1: 948 i40e_write_byte(context_bytes, &ce_info[f], dest); 949 break; 950 case 2: 951 i40e_write_word(context_bytes, &ce_info[f], dest); 952 break; 953 case 4: 954 i40e_write_dword(context_bytes, &ce_info[f], dest); 955 break; 956 case 8: 957 i40e_write_qword(context_bytes, &ce_info[f], dest); 958 break; 959 } 960 } 961 962 return 0; 963 } 964 965 /** 966 * i40e_hmc_get_object_va - retrieves an object's virtual address 967 * @hw: the hardware struct, from which we obtain the i40e_hmc_info pointer 968 * @object_base: pointer to u64 to get the va 969 * @rsrc_type: the hmc resource type 970 * @obj_idx: hmc object index 971 * 972 * This function retrieves the object's virtual address from the object 973 * base pointer. This function is used for LAN Queue contexts. 974 **/ 975 static 976 int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, 977 enum i40e_hmc_lan_rsrc_type rsrc_type, 978 u32 obj_idx) 979 { 980 struct i40e_hmc_info *hmc_info = &hw->hmc; 981 u32 obj_offset_in_sd, obj_offset_in_pd; 982 struct i40e_hmc_sd_entry *sd_entry; 983 struct i40e_hmc_pd_entry *pd_entry; 984 u32 pd_idx, pd_lmt, rel_pd_idx; 985 u64 obj_offset_in_fpm; 986 u32 sd_idx, sd_lmt; 987 int ret_code = 0; 988 989 if (NULL == hmc_info) { 990 ret_code = I40E_ERR_BAD_PTR; 991 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); 992 goto exit; 993 } 994 if (NULL == hmc_info->hmc_obj) { 995 ret_code = I40E_ERR_BAD_PTR; 996 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); 997 goto exit; 998 } 999 if (NULL == object_base) { 1000 ret_code = I40E_ERR_BAD_PTR; 1001 hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); 1002 goto exit; 1003 } 1004 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { 1005 ret_code = I40E_ERR_BAD_PTR; 1006 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); 1007 goto exit; 1008 } 1009 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { 1010 hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", 1011 ret_code); 1012 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 1013 goto exit; 1014 } 1015 /* find sd index and limit */ 1016 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, 1017 &sd_idx, &sd_lmt); 1018 1019 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 1020 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + 1021 hmc_info->hmc_obj[rsrc_type].size * obj_idx; 1022 1023 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { 1024 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, 1025 &pd_idx, &pd_lmt); 1026 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; 1027 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; 1028 obj_offset_in_pd = (u32)(obj_offset_in_fpm % 1029 I40E_HMC_PAGED_BP_SIZE); 1030 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; 1031 } else { 1032 obj_offset_in_sd = (u32)(obj_offset_in_fpm % 1033 I40E_HMC_DIRECT_BP_SIZE); 1034 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; 1035 } 1036 exit: 1037 return ret_code; 1038 } 1039 1040 /** 1041 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue 1042 * @hw: the hardware struct 1043 * @queue: the queue we care about 1044 **/ 1045 int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, 1046 u16 queue) 1047 { 1048 u8 *context_bytes; 1049 int err; 1050 1051 err = i40e_hmc_get_object_va(hw, &context_bytes, 1052 I40E_HMC_LAN_TX, queue); 1053 if (err < 0) 1054 return err; 1055 1056 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); 1057 } 1058 1059 /** 1060 * i40e_set_lan_tx_queue_context - set the HMC context for the queue 1061 * @hw: the hardware struct 1062 * @queue: the queue we care about 1063 * @s: the struct to be filled 1064 **/ 1065 int i40e_set_lan_tx_queue_context(struct i40e_hw *hw, 1066 u16 queue, 1067 struct i40e_hmc_obj_txq *s) 1068 { 1069 u8 *context_bytes; 1070 int err; 1071 1072 err = i40e_hmc_get_object_va(hw, &context_bytes, 1073 I40E_HMC_LAN_TX, queue); 1074 if (err < 0) 1075 return err; 1076 1077 return i40e_set_hmc_context(context_bytes, 1078 i40e_hmc_txq_ce_info, (u8 *)s); 1079 } 1080 1081 /** 1082 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue 1083 * @hw: the hardware struct 1084 * @queue: the queue we care about 1085 **/ 1086 int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, 1087 u16 queue) 1088 { 1089 u8 *context_bytes; 1090 int err; 1091 1092 err = i40e_hmc_get_object_va(hw, &context_bytes, 1093 I40E_HMC_LAN_RX, queue); 1094 if (err < 0) 1095 return err; 1096 1097 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); 1098 } 1099 1100 /** 1101 * i40e_set_lan_rx_queue_context - set the HMC context for the queue 1102 * @hw: the hardware struct 1103 * @queue: the queue we care about 1104 * @s: the struct to be filled 1105 **/ 1106 int i40e_set_lan_rx_queue_context(struct i40e_hw *hw, 1107 u16 queue, 1108 struct i40e_hmc_obj_rxq *s) 1109 { 1110 u8 *context_bytes; 1111 int err; 1112 1113 err = i40e_hmc_get_object_va(hw, &context_bytes, 1114 I40E_HMC_LAN_RX, queue); 1115 if (err < 0) 1116 return err; 1117 1118 return i40e_set_hmc_context(context_bytes, 1119 i40e_hmc_rxq_ce_info, (u8 *)s); 1120 } 1121