1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 * 22 * Contact Information: 23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * 26 ******************************************************************************/ 27 28 #include "i40e_osdep.h" 29 #include "i40e_register.h" 30 #include "i40e_status.h" 31 #include "i40e_alloc.h" 32 #include "i40e_hmc.h" 33 #include "i40e_type.h" 34 35 /** 36 * i40e_add_sd_table_entry - Adds a segment descriptor to the table 37 * @hw: pointer to our hw struct 38 * @hmc_info: pointer to the HMC configuration information struct 39 * @sd_index: segment descriptor index to manipulate 40 * @type: what type of segment descriptor we're manipulating 41 * @direct_mode_sz: size to alloc in direct mode 42 **/ 43 i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, 44 struct i40e_hmc_info *hmc_info, 45 u32 sd_index, 46 enum i40e_sd_entry_type type, 47 u64 direct_mode_sz) 48 { 49 enum i40e_memory_type mem_type __attribute__((unused)); 50 i40e_status ret_code = 0; 51 struct i40e_hmc_sd_entry *sd_entry; 52 bool dma_mem_alloc_done = false; 53 struct i40e_dma_mem mem; 54 u64 alloc_len; 55 56 if (NULL == hmc_info->sd_table.sd_entry) { 57 ret_code = I40E_ERR_BAD_PTR; 58 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); 59 goto exit; 60 } 61 62 if (sd_index >= hmc_info->sd_table.sd_cnt) { 63 ret_code = I40E_ERR_INVALID_SD_INDEX; 64 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); 65 goto exit; 66 } 67 68 sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; 69 if (!sd_entry->valid) { 70 if (I40E_SD_TYPE_PAGED == type) { 71 mem_type = i40e_mem_pd; 72 alloc_len = I40E_HMC_PAGED_BP_SIZE; 73 } else { 74 mem_type = i40e_mem_bp_jumbo; 75 alloc_len = direct_mode_sz; 76 } 77 78 /* allocate a 4K pd page or 2M backing page */ 79 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, 80 I40E_HMC_PD_BP_BUF_ALIGNMENT); 81 if (ret_code) 82 goto exit; 83 dma_mem_alloc_done = true; 84 if (I40E_SD_TYPE_PAGED == type) { 85 ret_code = i40e_allocate_virt_mem(hw, 86 &sd_entry->u.pd_table.pd_entry_virt_mem, 87 sizeof(struct i40e_hmc_pd_entry) * 512); 88 if (ret_code) 89 goto exit; 90 sd_entry->u.pd_table.pd_entry = 91 (struct i40e_hmc_pd_entry *) 92 sd_entry->u.pd_table.pd_entry_virt_mem.va; 93 memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, 94 sizeof(struct i40e_dma_mem)); 95 } else { 96 memcpy(&sd_entry->u.bp.addr, &mem, 97 sizeof(struct i40e_dma_mem)); 98 sd_entry->u.bp.sd_pd_index = sd_index; 99 } 100 /* initialize the sd entry */ 101 hmc_info->sd_table.sd_entry[sd_index].entry_type = type; 102 103 /* increment the ref count */ 104 I40E_INC_SD_REFCNT(&hmc_info->sd_table); 105 } 106 /* Increment backing page reference count */ 107 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) 108 I40E_INC_BP_REFCNT(&sd_entry->u.bp); 109 exit: 110 if (ret_code) 111 if (dma_mem_alloc_done) 112 i40e_free_dma_mem(hw, &mem); 113 114 return ret_code; 115 } 116 117 /** 118 * i40e_add_pd_table_entry - Adds page descriptor to the specified table 119 * @hw: pointer to our HW structure 120 * @hmc_info: pointer to the HMC configuration information structure 121 * @pd_index: which page descriptor index to manipulate 122 * 123 * This function: 124 * 1. Initializes the pd entry 125 * 2. Adds pd_entry in the pd_table 126 * 3. Mark the entry valid in i40e_hmc_pd_entry structure 127 * 4. Initializes the pd_entry's ref count to 1 128 * assumptions: 129 * 1. The memory for pd should be pinned down, physically contiguous and 130 * aligned on 4K boundary and zeroed memory. 131 * 2. It should be 4K in size. 132 **/ 133 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, 134 struct i40e_hmc_info *hmc_info, 135 u32 pd_index) 136 { 137 i40e_status ret_code = 0; 138 struct i40e_hmc_pd_table *pd_table; 139 struct i40e_hmc_pd_entry *pd_entry; 140 struct i40e_dma_mem mem; 141 u32 sd_idx, rel_pd_idx; 142 u64 *pd_addr; 143 u64 page_desc; 144 145 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { 146 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 147 hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); 148 goto exit; 149 } 150 151 /* find corresponding sd */ 152 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); 153 if (I40E_SD_TYPE_PAGED != 154 hmc_info->sd_table.sd_entry[sd_idx].entry_type) 155 goto exit; 156 157 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); 158 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 159 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 160 if (!pd_entry->valid) { 161 /* allocate a 4K backing page */ 162 ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, 163 I40E_HMC_PAGED_BP_SIZE, 164 I40E_HMC_PD_BP_BUF_ALIGNMENT); 165 if (ret_code) 166 goto exit; 167 168 memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem)); 169 pd_entry->bp.sd_pd_index = pd_index; 170 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; 171 /* Set page address and valid bit */ 172 page_desc = mem.pa | 0x1; 173 174 pd_addr = (u64 *)pd_table->pd_page_addr.va; 175 pd_addr += rel_pd_idx; 176 177 /* Add the backing page physical address in the pd entry */ 178 memcpy(pd_addr, &page_desc, sizeof(u64)); 179 180 pd_entry->sd_index = sd_idx; 181 pd_entry->valid = true; 182 I40E_INC_PD_REFCNT(pd_table); 183 } 184 I40E_INC_BP_REFCNT(&pd_entry->bp); 185 exit: 186 return ret_code; 187 } 188 189 /** 190 * i40e_remove_pd_bp - remove a backing page from a page descriptor 191 * @hw: pointer to our HW structure 192 * @hmc_info: pointer to the HMC configuration information structure 193 * @idx: the page index 194 * @is_pf: distinguishes a VF from a PF 195 * 196 * This function: 197 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table 198 * (for direct address mode) invalid. 199 * 2. Write to register PMPDINV to invalidate the backing page in FV cache 200 * 3. Decrement the ref count for the pd _entry 201 * assumptions: 202 * 1. Caller can deallocate the memory used by backing storage after this 203 * function returns. 204 **/ 205 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 206 struct i40e_hmc_info *hmc_info, 207 u32 idx, bool is_pf) 208 { 209 i40e_status ret_code = 0; 210 struct i40e_hmc_pd_entry *pd_entry; 211 struct i40e_hmc_pd_table *pd_table; 212 struct i40e_hmc_sd_entry *sd_entry; 213 u32 sd_idx, rel_pd_idx; 214 u64 *pd_addr; 215 216 /* calculate index */ 217 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; 218 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; 219 if (sd_idx >= hmc_info->sd_table.sd_cnt) { 220 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 221 hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); 222 goto exit; 223 } 224 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 225 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { 226 ret_code = I40E_ERR_INVALID_SD_TYPE; 227 hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); 228 goto exit; 229 } 230 /* get the entry and decrease its ref counter */ 231 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 232 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 233 I40E_DEC_BP_REFCNT(&pd_entry->bp); 234 if (pd_entry->bp.ref_cnt) 235 goto exit; 236 237 /* mark the entry invalid */ 238 pd_entry->valid = false; 239 I40E_DEC_PD_REFCNT(pd_table); 240 pd_addr = (u64 *)pd_table->pd_page_addr.va; 241 pd_addr += rel_pd_idx; 242 memset(pd_addr, 0, sizeof(u64)); 243 if (is_pf) 244 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); 245 else 246 I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id); 247 248 /* free memory here */ 249 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 250 if (ret_code) 251 goto exit; 252 if (!pd_table->ref_cnt) 253 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); 254 exit: 255 return ret_code; 256 } 257 258 /** 259 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry 260 * @hmc_info: pointer to the HMC configuration information structure 261 * @idx: the page index 262 **/ 263 i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 264 u32 idx) 265 { 266 i40e_status ret_code = 0; 267 struct i40e_hmc_sd_entry *sd_entry; 268 269 /* get the entry and decrease its ref counter */ 270 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 271 I40E_DEC_BP_REFCNT(&sd_entry->u.bp); 272 if (sd_entry->u.bp.ref_cnt) { 273 ret_code = I40E_ERR_NOT_READY; 274 goto exit; 275 } 276 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 277 278 /* mark the entry invalid */ 279 sd_entry->valid = false; 280 exit: 281 return ret_code; 282 } 283 284 /** 285 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor 286 * @hw: pointer to our hw struct 287 * @hmc_info: pointer to the HMC configuration information structure 288 * @idx: the page index 289 * @is_pf: used to distinguish between VF and PF 290 **/ 291 i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 292 struct i40e_hmc_info *hmc_info, 293 u32 idx, bool is_pf) 294 { 295 struct i40e_hmc_sd_entry *sd_entry; 296 i40e_status ret_code = 0; 297 298 /* get the entry and decrease its ref counter */ 299 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 300 if (is_pf) { 301 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); 302 } else { 303 ret_code = I40E_NOT_SUPPORTED; 304 goto exit; 305 } 306 ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); 307 if (ret_code) 308 goto exit; 309 exit: 310 return ret_code; 311 } 312 313 /** 314 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. 315 * @hmc_info: pointer to the HMC configuration information structure 316 * @idx: segment descriptor index to find the relevant page descriptor 317 **/ 318 i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, 319 u32 idx) 320 { 321 i40e_status ret_code = 0; 322 struct i40e_hmc_sd_entry *sd_entry; 323 324 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 325 326 if (sd_entry->u.pd_table.ref_cnt) { 327 ret_code = I40E_ERR_NOT_READY; 328 goto exit; 329 } 330 331 /* mark the entry invalid */ 332 sd_entry->valid = false; 333 334 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 335 exit: 336 return ret_code; 337 } 338 339 /** 340 * i40e_remove_pd_page_new - Removes a PD page from sd entry. 341 * @hw: pointer to our hw struct 342 * @hmc_info: pointer to the HMC configuration information structure 343 * @idx: segment descriptor index to find the relevant page descriptor 344 * @is_pf: used to distinguish between VF and PF 345 **/ 346 i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, 347 struct i40e_hmc_info *hmc_info, 348 u32 idx, bool is_pf) 349 { 350 i40e_status ret_code = 0; 351 struct i40e_hmc_sd_entry *sd_entry; 352 353 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 354 if (is_pf) { 355 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); 356 } else { 357 ret_code = I40E_NOT_SUPPORTED; 358 goto exit; 359 } 360 /* free memory here */ 361 ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); 362 if (ret_code) 363 goto exit; 364 exit: 365 return ret_code; 366 } 367