1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e_osdep.h" 28 #include "i40e_register.h" 29 #include "i40e_status.h" 30 #include "i40e_alloc.h" 31 #include "i40e_hmc.h" 32 #include "i40e_type.h" 33 34 /** 35 * i40e_add_sd_table_entry - Adds a segment descriptor to the table 36 * @hw: pointer to our hw struct 37 * @hmc_info: pointer to the HMC configuration information struct 38 * @sd_index: segment descriptor index to manipulate 39 * @type: what type of segment descriptor we're manipulating 40 * @direct_mode_sz: size to alloc in direct mode 41 **/ 42 i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, 43 struct i40e_hmc_info *hmc_info, 44 u32 sd_index, 45 enum i40e_sd_entry_type type, 46 u64 direct_mode_sz) 47 { 48 enum i40e_memory_type mem_type __attribute__((unused)); 49 struct i40e_hmc_sd_entry *sd_entry; 50 bool dma_mem_alloc_done = false; 51 struct i40e_dma_mem mem; 52 i40e_status ret_code = I40E_SUCCESS; 53 u64 alloc_len; 54 55 if (NULL == hmc_info->sd_table.sd_entry) { 56 ret_code = I40E_ERR_BAD_PTR; 57 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); 58 goto exit; 59 } 60 61 if (sd_index >= hmc_info->sd_table.sd_cnt) { 62 ret_code = I40E_ERR_INVALID_SD_INDEX; 63 hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); 64 goto exit; 65 } 66 67 sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; 68 if (!sd_entry->valid) { 69 if (I40E_SD_TYPE_PAGED == type) { 70 mem_type = i40e_mem_pd; 71 alloc_len = I40E_HMC_PAGED_BP_SIZE; 72 } else { 73 mem_type = i40e_mem_bp_jumbo; 74 alloc_len = direct_mode_sz; 75 } 76 77 /* allocate a 4K pd page or 2M backing page */ 78 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, 79 I40E_HMC_PD_BP_BUF_ALIGNMENT); 80 if (ret_code) 81 goto exit; 82 dma_mem_alloc_done = true; 83 if (I40E_SD_TYPE_PAGED == type) { 84 ret_code = i40e_allocate_virt_mem(hw, 85 &sd_entry->u.pd_table.pd_entry_virt_mem, 86 sizeof(struct i40e_hmc_pd_entry) * 512); 87 if (ret_code) 88 goto exit; 89 sd_entry->u.pd_table.pd_entry = 90 (struct i40e_hmc_pd_entry *) 91 sd_entry->u.pd_table.pd_entry_virt_mem.va; 92 sd_entry->u.pd_table.pd_page_addr = mem; 93 } else { 94 sd_entry->u.bp.addr = mem; 95 sd_entry->u.bp.sd_pd_index = sd_index; 96 } 97 /* initialize the sd entry */ 98 hmc_info->sd_table.sd_entry[sd_index].entry_type = type; 99 100 /* increment the ref count */ 101 I40E_INC_SD_REFCNT(&hmc_info->sd_table); 102 } 103 /* Increment backing page reference count */ 104 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) 105 I40E_INC_BP_REFCNT(&sd_entry->u.bp); 106 exit: 107 if (ret_code) 108 if (dma_mem_alloc_done) 109 i40e_free_dma_mem(hw, &mem); 110 111 return ret_code; 112 } 113 114 /** 115 * i40e_add_pd_table_entry - Adds page descriptor to the specified table 116 * @hw: pointer to our HW structure 117 * @hmc_info: pointer to the HMC configuration information structure 118 * @pd_index: which page descriptor index to manipulate 119 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. 120 * 121 * This function: 122 * 1. Initializes the pd entry 123 * 2. Adds pd_entry in the pd_table 124 * 3. Mark the entry valid in i40e_hmc_pd_entry structure 125 * 4. Initializes the pd_entry's ref count to 1 126 * assumptions: 127 * 1. The memory for pd should be pinned down, physically contiguous and 128 * aligned on 4K boundary and zeroed memory. 129 * 2. It should be 4K in size. 130 **/ 131 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, 132 struct i40e_hmc_info *hmc_info, 133 u32 pd_index, 134 struct i40e_dma_mem *rsrc_pg) 135 { 136 i40e_status ret_code = 0; 137 struct i40e_hmc_pd_table *pd_table; 138 struct i40e_hmc_pd_entry *pd_entry; 139 struct i40e_dma_mem mem; 140 struct i40e_dma_mem *page = &mem; 141 u32 sd_idx, rel_pd_idx; 142 u64 *pd_addr; 143 u64 page_desc; 144 145 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { 146 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 147 hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); 148 goto exit; 149 } 150 151 /* find corresponding sd */ 152 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); 153 if (I40E_SD_TYPE_PAGED != 154 hmc_info->sd_table.sd_entry[sd_idx].entry_type) 155 goto exit; 156 157 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); 158 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 159 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 160 if (!pd_entry->valid) { 161 if (rsrc_pg) { 162 pd_entry->rsrc_pg = true; 163 page = rsrc_pg; 164 } else { 165 /* allocate a 4K backing page */ 166 ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, 167 I40E_HMC_PAGED_BP_SIZE, 168 I40E_HMC_PD_BP_BUF_ALIGNMENT); 169 if (ret_code) 170 goto exit; 171 pd_entry->rsrc_pg = false; 172 } 173 174 pd_entry->bp.addr = *page; 175 pd_entry->bp.sd_pd_index = pd_index; 176 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; 177 /* Set page address and valid bit */ 178 page_desc = page->pa | 0x1; 179 180 pd_addr = (u64 *)pd_table->pd_page_addr.va; 181 pd_addr += rel_pd_idx; 182 183 /* Add the backing page physical address in the pd entry */ 184 memcpy(pd_addr, &page_desc, sizeof(u64)); 185 186 pd_entry->sd_index = sd_idx; 187 pd_entry->valid = true; 188 I40E_INC_PD_REFCNT(pd_table); 189 } 190 I40E_INC_BP_REFCNT(&pd_entry->bp); 191 exit: 192 return ret_code; 193 } 194 195 /** 196 * i40e_remove_pd_bp - remove a backing page from a page descriptor 197 * @hw: pointer to our HW structure 198 * @hmc_info: pointer to the HMC configuration information structure 199 * @idx: the page index 200 * @is_pf: distinguishes a VF from a PF 201 * 202 * This function: 203 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table 204 * (for direct address mode) invalid. 205 * 2. Write to register PMPDINV to invalidate the backing page in FV cache 206 * 3. Decrement the ref count for the pd _entry 207 * assumptions: 208 * 1. Caller can deallocate the memory used by backing storage after this 209 * function returns. 210 **/ 211 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, 212 struct i40e_hmc_info *hmc_info, 213 u32 idx) 214 { 215 i40e_status ret_code = 0; 216 struct i40e_hmc_pd_entry *pd_entry; 217 struct i40e_hmc_pd_table *pd_table; 218 struct i40e_hmc_sd_entry *sd_entry; 219 u32 sd_idx, rel_pd_idx; 220 u64 *pd_addr; 221 222 /* calculate index */ 223 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; 224 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; 225 if (sd_idx >= hmc_info->sd_table.sd_cnt) { 226 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 227 hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); 228 goto exit; 229 } 230 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 231 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { 232 ret_code = I40E_ERR_INVALID_SD_TYPE; 233 hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); 234 goto exit; 235 } 236 /* get the entry and decrease its ref counter */ 237 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 238 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 239 I40E_DEC_BP_REFCNT(&pd_entry->bp); 240 if (pd_entry->bp.ref_cnt) 241 goto exit; 242 243 /* mark the entry invalid */ 244 pd_entry->valid = false; 245 I40E_DEC_PD_REFCNT(pd_table); 246 pd_addr = (u64 *)pd_table->pd_page_addr.va; 247 pd_addr += rel_pd_idx; 248 memset(pd_addr, 0, sizeof(u64)); 249 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); 250 251 /* free memory here */ 252 if (!pd_entry->rsrc_pg) 253 ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); 254 if (ret_code) 255 goto exit; 256 if (!pd_table->ref_cnt) 257 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); 258 exit: 259 return ret_code; 260 } 261 262 /** 263 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry 264 * @hmc_info: pointer to the HMC configuration information structure 265 * @idx: the page index 266 **/ 267 i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 268 u32 idx) 269 { 270 i40e_status ret_code = 0; 271 struct i40e_hmc_sd_entry *sd_entry; 272 273 /* get the entry and decrease its ref counter */ 274 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 275 I40E_DEC_BP_REFCNT(&sd_entry->u.bp); 276 if (sd_entry->u.bp.ref_cnt) { 277 ret_code = I40E_ERR_NOT_READY; 278 goto exit; 279 } 280 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 281 282 /* mark the entry invalid */ 283 sd_entry->valid = false; 284 exit: 285 return ret_code; 286 } 287 288 /** 289 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor 290 * @hw: pointer to our hw struct 291 * @hmc_info: pointer to the HMC configuration information structure 292 * @idx: the page index 293 * @is_pf: used to distinguish between VF and PF 294 **/ 295 i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, 296 struct i40e_hmc_info *hmc_info, 297 u32 idx, bool is_pf) 298 { 299 struct i40e_hmc_sd_entry *sd_entry; 300 301 if (!is_pf) 302 return I40E_NOT_SUPPORTED; 303 304 /* get the entry and decrease its ref counter */ 305 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 306 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); 307 308 return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); 309 } 310 311 /** 312 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. 313 * @hmc_info: pointer to the HMC configuration information structure 314 * @idx: segment descriptor index to find the relevant page descriptor 315 **/ 316 i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, 317 u32 idx) 318 { 319 i40e_status ret_code = 0; 320 struct i40e_hmc_sd_entry *sd_entry; 321 322 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 323 324 if (sd_entry->u.pd_table.ref_cnt) { 325 ret_code = I40E_ERR_NOT_READY; 326 goto exit; 327 } 328 329 /* mark the entry invalid */ 330 sd_entry->valid = false; 331 332 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 333 exit: 334 return ret_code; 335 } 336 337 /** 338 * i40e_remove_pd_page_new - Removes a PD page from sd entry. 339 * @hw: pointer to our hw struct 340 * @hmc_info: pointer to the HMC configuration information structure 341 * @idx: segment descriptor index to find the relevant page descriptor 342 * @is_pf: used to distinguish between VF and PF 343 **/ 344 i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, 345 struct i40e_hmc_info *hmc_info, 346 u32 idx, bool is_pf) 347 { 348 struct i40e_hmc_sd_entry *sd_entry; 349 350 if (!is_pf) 351 return I40E_NOT_SUPPORTED; 352 353 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 354 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); 355 356 return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); 357 } 358