1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e_osdep.h"
5 #include "i40e_register.h"
6 #include "i40e_status.h"
7 #include "i40e_alloc.h"
8 #include "i40e_hmc.h"
9 #include "i40e_type.h"
10 
11 /**
12  * i40e_add_sd_table_entry - Adds a segment descriptor to the table
13  * @hw: pointer to our hw struct
14  * @hmc_info: pointer to the HMC configuration information struct
15  * @sd_index: segment descriptor index to manipulate
16  * @type: what type of segment descriptor we're manipulating
17  * @direct_mode_sz: size to alloc in direct mode
18  **/
19 i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
20 					      struct i40e_hmc_info *hmc_info,
21 					      u32 sd_index,
22 					      enum i40e_sd_entry_type type,
23 					      u64 direct_mode_sz)
24 {
25 	enum i40e_memory_type mem_type __attribute__((unused));
26 	struct i40e_hmc_sd_entry *sd_entry;
27 	bool dma_mem_alloc_done = false;
28 	struct i40e_dma_mem mem;
29 	i40e_status ret_code = I40E_SUCCESS;
30 	u64 alloc_len;
31 
32 	if (NULL == hmc_info->sd_table.sd_entry) {
33 		ret_code = I40E_ERR_BAD_PTR;
34 		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
35 		goto exit;
36 	}
37 
38 	if (sd_index >= hmc_info->sd_table.sd_cnt) {
39 		ret_code = I40E_ERR_INVALID_SD_INDEX;
40 		hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
41 		goto exit;
42 	}
43 
44 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
45 	if (!sd_entry->valid) {
46 		if (I40E_SD_TYPE_PAGED == type) {
47 			mem_type = i40e_mem_pd;
48 			alloc_len = I40E_HMC_PAGED_BP_SIZE;
49 		} else {
50 			mem_type = i40e_mem_bp_jumbo;
51 			alloc_len = direct_mode_sz;
52 		}
53 
54 		/* allocate a 4K pd page or 2M backing page */
55 		ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
56 						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
57 		if (ret_code)
58 			goto exit;
59 		dma_mem_alloc_done = true;
60 		if (I40E_SD_TYPE_PAGED == type) {
61 			ret_code = i40e_allocate_virt_mem(hw,
62 					&sd_entry->u.pd_table.pd_entry_virt_mem,
63 					sizeof(struct i40e_hmc_pd_entry) * 512);
64 			if (ret_code)
65 				goto exit;
66 			sd_entry->u.pd_table.pd_entry =
67 				(struct i40e_hmc_pd_entry *)
68 				sd_entry->u.pd_table.pd_entry_virt_mem.va;
69 			sd_entry->u.pd_table.pd_page_addr = mem;
70 		} else {
71 			sd_entry->u.bp.addr = mem;
72 			sd_entry->u.bp.sd_pd_index = sd_index;
73 		}
74 		/* initialize the sd entry */
75 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
76 
77 		/* increment the ref count */
78 		I40E_INC_SD_REFCNT(&hmc_info->sd_table);
79 	}
80 	/* Increment backing page reference count */
81 	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
82 		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
83 exit:
84 	if (ret_code)
85 		if (dma_mem_alloc_done)
86 			i40e_free_dma_mem(hw, &mem);
87 
88 	return ret_code;
89 }
90 
91 /**
92  * i40e_add_pd_table_entry - Adds page descriptor to the specified table
93  * @hw: pointer to our HW structure
94  * @hmc_info: pointer to the HMC configuration information structure
95  * @pd_index: which page descriptor index to manipulate
96  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
97  *
98  * This function:
99  *	1. Initializes the pd entry
100  *	2. Adds pd_entry in the pd_table
101  *	3. Mark the entry valid in i40e_hmc_pd_entry structure
102  *	4. Initializes the pd_entry's ref count to 1
103  * assumptions:
104  *	1. The memory for pd should be pinned down, physically contiguous and
105  *	   aligned on 4K boundary and zeroed memory.
106  *	2. It should be 4K in size.
107  **/
108 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
109 					      struct i40e_hmc_info *hmc_info,
110 					      u32 pd_index,
111 					      struct i40e_dma_mem *rsrc_pg)
112 {
113 	i40e_status ret_code = 0;
114 	struct i40e_hmc_pd_table *pd_table;
115 	struct i40e_hmc_pd_entry *pd_entry;
116 	struct i40e_dma_mem mem;
117 	struct i40e_dma_mem *page = &mem;
118 	u32 sd_idx, rel_pd_idx;
119 	u64 *pd_addr;
120 	u64 page_desc;
121 
122 	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
123 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
124 		hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
125 		goto exit;
126 	}
127 
128 	/* find corresponding sd */
129 	sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
130 	if (I40E_SD_TYPE_PAGED !=
131 	    hmc_info->sd_table.sd_entry[sd_idx].entry_type)
132 		goto exit;
133 
134 	rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
135 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
136 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
137 	if (!pd_entry->valid) {
138 		if (rsrc_pg) {
139 			pd_entry->rsrc_pg = true;
140 			page = rsrc_pg;
141 		} else {
142 			/* allocate a 4K backing page */
143 			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
144 						I40E_HMC_PAGED_BP_SIZE,
145 						I40E_HMC_PD_BP_BUF_ALIGNMENT);
146 			if (ret_code)
147 				goto exit;
148 			pd_entry->rsrc_pg = false;
149 		}
150 
151 		pd_entry->bp.addr = *page;
152 		pd_entry->bp.sd_pd_index = pd_index;
153 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
154 		/* Set page address and valid bit */
155 		page_desc = page->pa | 0x1;
156 
157 		pd_addr = (u64 *)pd_table->pd_page_addr.va;
158 		pd_addr += rel_pd_idx;
159 
160 		/* Add the backing page physical address in the pd entry */
161 		memcpy(pd_addr, &page_desc, sizeof(u64));
162 
163 		pd_entry->sd_index = sd_idx;
164 		pd_entry->valid = true;
165 		I40E_INC_PD_REFCNT(pd_table);
166 	}
167 	I40E_INC_BP_REFCNT(&pd_entry->bp);
168 exit:
169 	return ret_code;
170 }
171 
172 /**
173  * i40e_remove_pd_bp - remove a backing page from a page descriptor
174  * @hw: pointer to our HW structure
175  * @hmc_info: pointer to the HMC configuration information structure
176  * @idx: the page index
177  *
178  * This function:
179  *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
180  *	   (for direct address mode) invalid.
181  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
182  *	3. Decrement the ref count for the pd _entry
183  * assumptions:
184  *	1. Caller can deallocate the memory used by backing storage after this
185  *	   function returns.
186  **/
187 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
188 					struct i40e_hmc_info *hmc_info,
189 					u32 idx)
190 {
191 	i40e_status ret_code = 0;
192 	struct i40e_hmc_pd_entry *pd_entry;
193 	struct i40e_hmc_pd_table *pd_table;
194 	struct i40e_hmc_sd_entry *sd_entry;
195 	u32 sd_idx, rel_pd_idx;
196 	u64 *pd_addr;
197 
198 	/* calculate index */
199 	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
200 	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
201 	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
202 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
203 		hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
204 		goto exit;
205 	}
206 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
207 	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
208 		ret_code = I40E_ERR_INVALID_SD_TYPE;
209 		hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
210 		goto exit;
211 	}
212 	/* get the entry and decrease its ref counter */
213 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
214 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
215 	I40E_DEC_BP_REFCNT(&pd_entry->bp);
216 	if (pd_entry->bp.ref_cnt)
217 		goto exit;
218 
219 	/* mark the entry invalid */
220 	pd_entry->valid = false;
221 	I40E_DEC_PD_REFCNT(pd_table);
222 	pd_addr = (u64 *)pd_table->pd_page_addr.va;
223 	pd_addr += rel_pd_idx;
224 	memset(pd_addr, 0, sizeof(u64));
225 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
226 
227 	/* free memory here */
228 	if (!pd_entry->rsrc_pg)
229 		ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
230 	if (ret_code)
231 		goto exit;
232 	if (!pd_table->ref_cnt)
233 		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
234 exit:
235 	return ret_code;
236 }
237 
238 /**
239  * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
240  * @hmc_info: pointer to the HMC configuration information structure
241  * @idx: the page index
242  **/
243 i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
244 					     u32 idx)
245 {
246 	i40e_status ret_code = 0;
247 	struct i40e_hmc_sd_entry *sd_entry;
248 
249 	/* get the entry and decrease its ref counter */
250 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
251 	I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
252 	if (sd_entry->u.bp.ref_cnt) {
253 		ret_code = I40E_ERR_NOT_READY;
254 		goto exit;
255 	}
256 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
257 
258 	/* mark the entry invalid */
259 	sd_entry->valid = false;
260 exit:
261 	return ret_code;
262 }
263 
264 /**
265  * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
266  * @hw: pointer to our hw struct
267  * @hmc_info: pointer to the HMC configuration information structure
268  * @idx: the page index
269  * @is_pf: used to distinguish between VF and PF
270  **/
271 i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
272 					    struct i40e_hmc_info *hmc_info,
273 					    u32 idx, bool is_pf)
274 {
275 	struct i40e_hmc_sd_entry *sd_entry;
276 
277 	if (!is_pf)
278 		return I40E_NOT_SUPPORTED;
279 
280 	/* get the entry and decrease its ref counter */
281 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
282 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
283 
284 	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
285 }
286 
287 /**
288  * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
289  * @hmc_info: pointer to the HMC configuration information structure
290  * @idx: segment descriptor index to find the relevant page descriptor
291  **/
292 i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
293 					       u32 idx)
294 {
295 	i40e_status ret_code = 0;
296 	struct i40e_hmc_sd_entry *sd_entry;
297 
298 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
299 
300 	if (sd_entry->u.pd_table.ref_cnt) {
301 		ret_code = I40E_ERR_NOT_READY;
302 		goto exit;
303 	}
304 
305 	/* mark the entry invalid */
306 	sd_entry->valid = false;
307 
308 	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
309 exit:
310 	return ret_code;
311 }
312 
313 /**
314  * i40e_remove_pd_page_new - Removes a PD page from sd entry.
315  * @hw: pointer to our hw struct
316  * @hmc_info: pointer to the HMC configuration information structure
317  * @idx: segment descriptor index to find the relevant page descriptor
318  * @is_pf: used to distinguish between VF and PF
319  **/
320 i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
321 					      struct i40e_hmc_info *hmc_info,
322 					      u32 idx, bool is_pf)
323 {
324 	struct i40e_hmc_sd_entry *sd_entry;
325 
326 	if (!is_pf)
327 		return I40E_NOT_SUPPORTED;
328 
329 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
330 	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
331 
332 	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
333 }
334