1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e_osdep.h"
5 #include "i40e_register.h"
6 #include "i40e_type.h"
7 #include "i40e_hmc.h"
8 #include "i40e_lan_hmc.h"
9 #include "i40e_prototype.h"
10 
11 /* lan specific interface functions */
12 
13 /**
14  * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
15  * @offset: base address offset needing alignment
16  *
17  * Aligns the layer 2 function private memory so it's 512-byte aligned.
18  **/
19 static u64 i40e_align_l2obj_base(u64 offset)
20 {
21 	u64 aligned_offset = offset;
22 
23 	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
24 		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
25 				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
26 
27 	return aligned_offset;
28 }
29 
30 /**
31  * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
32  * @txq_num: number of Tx queues needing backing context
33  * @rxq_num: number of Rx queues needing backing context
34  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
35  * @fcoe_filt_num: number of FCoE filters needing backing context
36  *
37  * Calculates the maximum amount of memory for the function required, based
38  * on the number of resources it must provide context for.
39  **/
40 static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
41 			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
42 {
43 	u64 fpm_size = 0;
44 
45 	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
46 	fpm_size = i40e_align_l2obj_base(fpm_size);
47 
48 	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
49 	fpm_size = i40e_align_l2obj_base(fpm_size);
50 
51 	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
52 	fpm_size = i40e_align_l2obj_base(fpm_size);
53 
54 	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
55 	fpm_size = i40e_align_l2obj_base(fpm_size);
56 
57 	return fpm_size;
58 }
59 
60 /**
61  * i40e_init_lan_hmc - initialize i40e_hmc_info struct
62  * @hw: pointer to the HW structure
63  * @txq_num: number of Tx queues needing backing context
64  * @rxq_num: number of Rx queues needing backing context
65  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66  * @fcoe_filt_num: number of FCoE filters needing backing context
67  *
68  * This function will be called once per physical function initialization.
69  * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
70  * the driver's provided input, as well as information from the HMC itself
71  * loaded from NVRAM.
72  *
73  * Assumptions:
74  *   - HMC Resource Profile has been selected before calling this function.
75  **/
76 i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
77 					u32 rxq_num, u32 fcoe_cntx_num,
78 					u32 fcoe_filt_num)
79 {
80 	struct i40e_hmc_obj_info *obj, *full_obj;
81 	i40e_status ret_code = 0;
82 	u64 l2fpm_size;
83 	u32 size_exp;
84 
85 	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
86 	hw->hmc.hmc_fn_id = hw->pf_id;
87 
88 	/* allocate memory for hmc_obj */
89 	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
90 			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
91 	if (ret_code)
92 		goto init_lan_hmc_out;
93 	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
94 			  hw->hmc.hmc_obj_virt_mem.va;
95 
96 	/* The full object will be used to create the LAN HMC SD */
97 	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
98 	full_obj->max_cnt = 0;
99 	full_obj->cnt = 0;
100 	full_obj->base = 0;
101 	full_obj->size = 0;
102 
103 	/* Tx queue context information */
104 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
105 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
106 	obj->cnt = txq_num;
107 	obj->base = 0;
108 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
109 	obj->size = BIT_ULL(size_exp);
110 
111 	/* validate values requested by driver don't exceed HMC capacity */
112 	if (txq_num > obj->max_cnt) {
113 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
114 		hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
115 			  txq_num, obj->max_cnt, ret_code);
116 		goto init_lan_hmc_out;
117 	}
118 
119 	/* aggregate values into the full LAN object for later */
120 	full_obj->max_cnt += obj->max_cnt;
121 	full_obj->cnt += obj->cnt;
122 
123 	/* Rx queue context information */
124 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
125 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
126 	obj->cnt = rxq_num;
127 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
128 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
129 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
130 	obj->base = i40e_align_l2obj_base(obj->base);
131 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
132 	obj->size = BIT_ULL(size_exp);
133 
134 	/* validate values requested by driver don't exceed HMC capacity */
135 	if (rxq_num > obj->max_cnt) {
136 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
137 		hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
138 			  rxq_num, obj->max_cnt, ret_code);
139 		goto init_lan_hmc_out;
140 	}
141 
142 	/* aggregate values into the full LAN object for later */
143 	full_obj->max_cnt += obj->max_cnt;
144 	full_obj->cnt += obj->cnt;
145 
146 	/* FCoE context information */
147 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
148 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
149 	obj->cnt = fcoe_cntx_num;
150 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
151 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
152 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
153 	obj->base = i40e_align_l2obj_base(obj->base);
154 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
155 	obj->size = BIT_ULL(size_exp);
156 
157 	/* validate values requested by driver don't exceed HMC capacity */
158 	if (fcoe_cntx_num > obj->max_cnt) {
159 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
160 		hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
161 			  fcoe_cntx_num, obj->max_cnt, ret_code);
162 		goto init_lan_hmc_out;
163 	}
164 
165 	/* aggregate values into the full LAN object for later */
166 	full_obj->max_cnt += obj->max_cnt;
167 	full_obj->cnt += obj->cnt;
168 
169 	/* FCoE filter information */
170 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
171 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
172 	obj->cnt = fcoe_filt_num;
173 	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
174 		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
175 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
176 	obj->base = i40e_align_l2obj_base(obj->base);
177 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
178 	obj->size = BIT_ULL(size_exp);
179 
180 	/* validate values requested by driver don't exceed HMC capacity */
181 	if (fcoe_filt_num > obj->max_cnt) {
182 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
183 		hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
184 			  fcoe_filt_num, obj->max_cnt, ret_code);
185 		goto init_lan_hmc_out;
186 	}
187 
188 	/* aggregate values into the full LAN object for later */
189 	full_obj->max_cnt += obj->max_cnt;
190 	full_obj->cnt += obj->cnt;
191 
192 	hw->hmc.first_sd_index = 0;
193 	hw->hmc.sd_table.ref_cnt = 0;
194 	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
195 					       fcoe_filt_num);
196 	if (NULL == hw->hmc.sd_table.sd_entry) {
197 		hw->hmc.sd_table.sd_cnt = (u32)
198 				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
199 				   I40E_HMC_DIRECT_BP_SIZE;
200 
201 		/* allocate the sd_entry members in the sd_table */
202 		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
203 					  (sizeof(struct i40e_hmc_sd_entry) *
204 					  hw->hmc.sd_table.sd_cnt));
205 		if (ret_code)
206 			goto init_lan_hmc_out;
207 		hw->hmc.sd_table.sd_entry =
208 			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
209 	}
210 	/* store in the LAN full object for later */
211 	full_obj->size = l2fpm_size;
212 
213 init_lan_hmc_out:
214 	return ret_code;
215 }
216 
217 /**
218  * i40e_remove_pd_page - Remove a page from the page descriptor table
219  * @hw: pointer to the HW structure
220  * @hmc_info: pointer to the HMC configuration information structure
221  * @idx: segment descriptor index to find the relevant page descriptor
222  *
223  * This function:
224  *	1. Marks the entry in pd table (for paged address mode) invalid
225  *	2. write to register PMPDINV to invalidate the backing page in FV cache
226  *	3. Decrement the ref count for  pd_entry
227  * assumptions:
228  *	1. caller can deallocate the memory used by pd after this function
229  *	   returns.
230  **/
231 static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
232 						 struct i40e_hmc_info *hmc_info,
233 						 u32 idx)
234 {
235 	i40e_status ret_code = 0;
236 
237 	if (!i40e_prep_remove_pd_page(hmc_info, idx))
238 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
239 
240 	return ret_code;
241 }
242 
243 /**
244  * i40e_remove_sd_bp - remove a backing page from a segment descriptor
245  * @hw: pointer to our HW structure
246  * @hmc_info: pointer to the HMC configuration information structure
247  * @idx: the page index
248  *
249  * This function:
250  *	1. Marks the entry in sd table (for direct address mode) invalid
251  *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
252  *	   to 0) and PMSDDATAHIGH to invalidate the sd page
253  *	3. Decrement the ref count for the sd_entry
254  * assumptions:
255  *	1. caller can deallocate the memory used by backing storage after this
256  *	   function returns.
257  **/
258 static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
259 					       struct i40e_hmc_info *hmc_info,
260 					       u32 idx)
261 {
262 	i40e_status ret_code = 0;
263 
264 	if (!i40e_prep_remove_sd_bp(hmc_info, idx))
265 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
266 
267 	return ret_code;
268 }
269 
270 /**
271  * i40e_create_lan_hmc_object - allocate backing store for hmc objects
272  * @hw: pointer to the HW structure
273  * @info: pointer to i40e_hmc_create_obj_info struct
274  *
275  * This will allocate memory for PDs and backing pages and populate
276  * the sd and pd entries.
277  **/
278 static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
279 				struct i40e_hmc_lan_create_obj_info *info)
280 {
281 	i40e_status ret_code = 0;
282 	struct i40e_hmc_sd_entry *sd_entry;
283 	u32 pd_idx1 = 0, pd_lmt1 = 0;
284 	u32 pd_idx = 0, pd_lmt = 0;
285 	bool pd_error = false;
286 	u32 sd_idx, sd_lmt;
287 	u64 sd_size;
288 	u32 i, j;
289 
290 	if (NULL == info) {
291 		ret_code = I40E_ERR_BAD_PTR;
292 		hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
293 		goto exit;
294 	}
295 	if (NULL == info->hmc_info) {
296 		ret_code = I40E_ERR_BAD_PTR;
297 		hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
298 		goto exit;
299 	}
300 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
301 		ret_code = I40E_ERR_BAD_PTR;
302 		hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
303 		goto exit;
304 	}
305 
306 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
307 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
308 		hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
309 			  ret_code);
310 		goto exit;
311 	}
312 	if ((info->start_idx + info->count) >
313 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
314 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
315 		hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
316 			  ret_code);
317 		goto exit;
318 	}
319 
320 	/* find sd index and limit */
321 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
322 				 info->start_idx, info->count,
323 				 &sd_idx, &sd_lmt);
324 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
325 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
326 			ret_code = I40E_ERR_INVALID_SD_INDEX;
327 			goto exit;
328 	}
329 	/* find pd index */
330 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
331 				 info->start_idx, info->count, &pd_idx,
332 				 &pd_lmt);
333 
334 	/* This is to cover for cases where you may not want to have an SD with
335 	 * the full 2M memory but something smaller. By not filling out any
336 	 * size, the function will default the SD size to be 2M.
337 	 */
338 	if (info->direct_mode_sz == 0)
339 		sd_size = I40E_HMC_DIRECT_BP_SIZE;
340 	else
341 		sd_size = info->direct_mode_sz;
342 
343 	/* check if all the sds are valid. If not, allocate a page and
344 	 * initialize it.
345 	 */
346 	for (j = sd_idx; j < sd_lmt; j++) {
347 		/* update the sd table entry */
348 		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
349 						   info->entry_type,
350 						   sd_size);
351 		if (ret_code)
352 			goto exit_sd_error;
353 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
354 		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
355 			/* check if all the pds in this sd are valid. If not,
356 			 * allocate a page and initialize it.
357 			 */
358 
359 			/* find pd_idx and pd_lmt in this sd */
360 			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
361 			pd_lmt1 = min(pd_lmt,
362 				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
363 			for (i = pd_idx1; i < pd_lmt1; i++) {
364 				/* update the pd table entry */
365 				ret_code = i40e_add_pd_table_entry(hw,
366 								info->hmc_info,
367 								i, NULL);
368 				if (ret_code) {
369 					pd_error = true;
370 					break;
371 				}
372 			}
373 			if (pd_error) {
374 				/* remove the backing pages from pd_idx1 to i */
375 				while (i && (i > pd_idx1)) {
376 					i40e_remove_pd_bp(hw, info->hmc_info,
377 							  (i - 1));
378 					i--;
379 				}
380 			}
381 		}
382 		if (!sd_entry->valid) {
383 			sd_entry->valid = true;
384 			switch (sd_entry->entry_type) {
385 			case I40E_SD_TYPE_PAGED:
386 				I40E_SET_PF_SD_ENTRY(hw,
387 					sd_entry->u.pd_table.pd_page_addr.pa,
388 					j, sd_entry->entry_type);
389 				break;
390 			case I40E_SD_TYPE_DIRECT:
391 				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
392 						     j, sd_entry->entry_type);
393 				break;
394 			default:
395 				ret_code = I40E_ERR_INVALID_SD_TYPE;
396 				goto exit;
397 			}
398 		}
399 	}
400 	goto exit;
401 
402 exit_sd_error:
403 	/* cleanup for sd entries from j to sd_idx */
404 	while (j && (j > sd_idx)) {
405 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
406 		switch (sd_entry->entry_type) {
407 		case I40E_SD_TYPE_PAGED:
408 			pd_idx1 = max(pd_idx,
409 				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
410 			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
411 			for (i = pd_idx1; i < pd_lmt1; i++)
412 				i40e_remove_pd_bp(hw, info->hmc_info, i);
413 			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
414 			break;
415 		case I40E_SD_TYPE_DIRECT:
416 			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
417 			break;
418 		default:
419 			ret_code = I40E_ERR_INVALID_SD_TYPE;
420 			break;
421 		}
422 		j--;
423 	}
424 exit:
425 	return ret_code;
426 }
427 
428 /**
429  * i40e_configure_lan_hmc - prepare the HMC backing store
430  * @hw: pointer to the hw structure
431  * @model: the model for the layout of the SD/PD tables
432  *
433  * - This function will be called once per physical function initialization.
434  * - This function will be called after i40e_init_lan_hmc() and before
435  *   any LAN/FCoE HMC objects can be created.
436  **/
437 i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
438 					     enum i40e_hmc_model model)
439 {
440 	struct i40e_hmc_lan_create_obj_info info;
441 	i40e_status ret_code = 0;
442 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
443 	struct i40e_hmc_obj_info *obj;
444 
445 	/* Initialize part of the create object info struct */
446 	info.hmc_info = &hw->hmc;
447 	info.rsrc_type = I40E_HMC_LAN_FULL;
448 	info.start_idx = 0;
449 	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
450 
451 	/* Build the SD entry for the LAN objects */
452 	switch (model) {
453 	case I40E_HMC_MODEL_DIRECT_PREFERRED:
454 	case I40E_HMC_MODEL_DIRECT_ONLY:
455 		info.entry_type = I40E_SD_TYPE_DIRECT;
456 		/* Make one big object, a single SD */
457 		info.count = 1;
458 		ret_code = i40e_create_lan_hmc_object(hw, &info);
459 		if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
460 			goto try_type_paged;
461 		else if (ret_code)
462 			goto configure_lan_hmc_out;
463 		/* else clause falls through the break */
464 		break;
465 	case I40E_HMC_MODEL_PAGED_ONLY:
466 try_type_paged:
467 		info.entry_type = I40E_SD_TYPE_PAGED;
468 		/* Make one big object in the PD table */
469 		info.count = 1;
470 		ret_code = i40e_create_lan_hmc_object(hw, &info);
471 		if (ret_code)
472 			goto configure_lan_hmc_out;
473 		break;
474 	default:
475 		/* unsupported type */
476 		ret_code = I40E_ERR_INVALID_SD_TYPE;
477 		hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
478 			  ret_code);
479 		goto configure_lan_hmc_out;
480 	}
481 
482 	/* Configure and program the FPM registers so objects can be created */
483 
484 	/* Tx contexts */
485 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
486 	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
487 	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
488 	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
489 
490 	/* Rx contexts */
491 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
492 	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
493 	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
494 	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
495 
496 	/* FCoE contexts */
497 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
498 	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
499 	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
500 	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
501 
502 	/* FCoE filters */
503 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
504 	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
505 	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
506 	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
507 
508 configure_lan_hmc_out:
509 	return ret_code;
510 }
511 
512 /**
513  * i40e_delete_hmc_object - remove hmc objects
514  * @hw: pointer to the HW structure
515  * @info: pointer to i40e_hmc_delete_obj_info struct
516  *
517  * This will de-populate the SDs and PDs.  It frees
518  * the memory for PDS and backing storage.  After this function is returned,
519  * caller should deallocate memory allocated previously for
520  * book-keeping information about PDs and backing storage.
521  **/
522 static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
523 				struct i40e_hmc_lan_delete_obj_info *info)
524 {
525 	i40e_status ret_code = 0;
526 	struct i40e_hmc_pd_table *pd_table;
527 	u32 pd_idx, pd_lmt, rel_pd_idx;
528 	u32 sd_idx, sd_lmt;
529 	u32 i, j;
530 
531 	if (NULL == info) {
532 		ret_code = I40E_ERR_BAD_PTR;
533 		hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
534 		goto exit;
535 	}
536 	if (NULL == info->hmc_info) {
537 		ret_code = I40E_ERR_BAD_PTR;
538 		hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
539 		goto exit;
540 	}
541 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
542 		ret_code = I40E_ERR_BAD_PTR;
543 		hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
544 		goto exit;
545 	}
546 
547 	if (NULL == info->hmc_info->sd_table.sd_entry) {
548 		ret_code = I40E_ERR_BAD_PTR;
549 		hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
550 		goto exit;
551 	}
552 
553 	if (NULL == info->hmc_info->hmc_obj) {
554 		ret_code = I40E_ERR_BAD_PTR;
555 		hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
556 		goto exit;
557 	}
558 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
559 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
560 		hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
561 			  ret_code);
562 		goto exit;
563 	}
564 
565 	if ((info->start_idx + info->count) >
566 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
567 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
568 		hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
569 			  ret_code);
570 		goto exit;
571 	}
572 
573 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
574 				 info->start_idx, info->count, &pd_idx,
575 				 &pd_lmt);
576 
577 	for (j = pd_idx; j < pd_lmt; j++) {
578 		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
579 
580 		if (I40E_SD_TYPE_PAGED !=
581 		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
582 			continue;
583 
584 		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
585 
586 		pd_table =
587 			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
588 		if (pd_table->pd_entry[rel_pd_idx].valid) {
589 			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
590 			if (ret_code)
591 				goto exit;
592 		}
593 	}
594 
595 	/* find sd index and limit */
596 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
597 				 info->start_idx, info->count,
598 				 &sd_idx, &sd_lmt);
599 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
600 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
601 		ret_code = I40E_ERR_INVALID_SD_INDEX;
602 		goto exit;
603 	}
604 
605 	for (i = sd_idx; i < sd_lmt; i++) {
606 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
607 			continue;
608 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
609 		case I40E_SD_TYPE_DIRECT:
610 			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
611 			if (ret_code)
612 				goto exit;
613 			break;
614 		case I40E_SD_TYPE_PAGED:
615 			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
616 			if (ret_code)
617 				goto exit;
618 			break;
619 		default:
620 			break;
621 		}
622 	}
623 exit:
624 	return ret_code;
625 }
626 
627 /**
628  * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
629  * @hw: pointer to the hw structure
630  *
631  * This must be called by drivers as they are shutting down and being
632  * removed from the OS.
633  **/
634 i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
635 {
636 	struct i40e_hmc_lan_delete_obj_info info;
637 	i40e_status ret_code;
638 
639 	info.hmc_info = &hw->hmc;
640 	info.rsrc_type = I40E_HMC_LAN_FULL;
641 	info.start_idx = 0;
642 	info.count = 1;
643 
644 	/* delete the object */
645 	ret_code = i40e_delete_lan_hmc_object(hw, &info);
646 
647 	/* free the SD table entry for LAN */
648 	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
649 	hw->hmc.sd_table.sd_cnt = 0;
650 	hw->hmc.sd_table.sd_entry = NULL;
651 
652 	/* free memory used for hmc_obj */
653 	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
654 	hw->hmc.hmc_obj = NULL;
655 
656 	return ret_code;
657 }
658 
659 #define I40E_HMC_STORE(_struct, _ele)		\
660 	offsetof(struct _struct, _ele),		\
661 	FIELD_SIZEOF(struct _struct, _ele)
662 
663 struct i40e_context_ele {
664 	u16 offset;
665 	u16 size_of;
666 	u16 width;
667 	u16 lsb;
668 };
669 
670 /* LAN Tx Queue Context */
671 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
672 					     /* Field      Width    LSB */
673 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
674 	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
675 	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
676 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
677 	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
678 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
679 	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
680 	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
681 /* line 1 */
682 	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
683 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
684 	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
685 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
686 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
687 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
688 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
689 /* line 7 */
690 	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
691 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
692 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
693 	{ 0 }
694 };
695 
696 /* LAN Rx Queue Context */
697 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
698 					 /* Field      Width    LSB */
699 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
700 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
701 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
702 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
703 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
704 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
705 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
706 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
707 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
708 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
709 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
710 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
711 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
712 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
713 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
714 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
715 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
716 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
717 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
718 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
719 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
720 	{ 0 }
721 };
722 
723 /**
724  * i40e_write_byte - replace HMC context byte
725  * @hmc_bits: pointer to the HMC memory
726  * @ce_info: a description of the struct to be read from
727  * @src: the struct to be read from
728  **/
729 static void i40e_write_byte(u8 *hmc_bits,
730 			    struct i40e_context_ele *ce_info,
731 			    u8 *src)
732 {
733 	u8 src_byte, dest_byte, mask;
734 	u8 *from, *dest;
735 	u16 shift_width;
736 
737 	/* copy from the next struct field */
738 	from = src + ce_info->offset;
739 
740 	/* prepare the bits and mask */
741 	shift_width = ce_info->lsb % 8;
742 	mask = (u8)(BIT(ce_info->width) - 1);
743 
744 	src_byte = *from;
745 	src_byte &= mask;
746 
747 	/* shift to correct alignment */
748 	mask <<= shift_width;
749 	src_byte <<= shift_width;
750 
751 	/* get the current bits from the target bit string */
752 	dest = hmc_bits + (ce_info->lsb / 8);
753 
754 	memcpy(&dest_byte, dest, sizeof(dest_byte));
755 
756 	dest_byte &= ~mask;	/* get the bits not changing */
757 	dest_byte |= src_byte;	/* add in the new bits */
758 
759 	/* put it all back */
760 	memcpy(dest, &dest_byte, sizeof(dest_byte));
761 }
762 
763 /**
764  * i40e_write_word - replace HMC context word
765  * @hmc_bits: pointer to the HMC memory
766  * @ce_info: a description of the struct to be read from
767  * @src: the struct to be read from
768  **/
769 static void i40e_write_word(u8 *hmc_bits,
770 			    struct i40e_context_ele *ce_info,
771 			    u8 *src)
772 {
773 	u16 src_word, mask;
774 	u8 *from, *dest;
775 	u16 shift_width;
776 	__le16 dest_word;
777 
778 	/* copy from the next struct field */
779 	from = src + ce_info->offset;
780 
781 	/* prepare the bits and mask */
782 	shift_width = ce_info->lsb % 8;
783 	mask = BIT(ce_info->width) - 1;
784 
785 	/* don't swizzle the bits until after the mask because the mask bits
786 	 * will be in a different bit position on big endian machines
787 	 */
788 	src_word = *(u16 *)from;
789 	src_word &= mask;
790 
791 	/* shift to correct alignment */
792 	mask <<= shift_width;
793 	src_word <<= shift_width;
794 
795 	/* get the current bits from the target bit string */
796 	dest = hmc_bits + (ce_info->lsb / 8);
797 
798 	memcpy(&dest_word, dest, sizeof(dest_word));
799 
800 	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
801 	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
802 
803 	/* put it all back */
804 	memcpy(dest, &dest_word, sizeof(dest_word));
805 }
806 
807 /**
808  * i40e_write_dword - replace HMC context dword
809  * @hmc_bits: pointer to the HMC memory
810  * @ce_info: a description of the struct to be read from
811  * @src: the struct to be read from
812  **/
813 static void i40e_write_dword(u8 *hmc_bits,
814 			     struct i40e_context_ele *ce_info,
815 			     u8 *src)
816 {
817 	u32 src_dword, mask;
818 	u8 *from, *dest;
819 	u16 shift_width;
820 	__le32 dest_dword;
821 
822 	/* copy from the next struct field */
823 	from = src + ce_info->offset;
824 
825 	/* prepare the bits and mask */
826 	shift_width = ce_info->lsb % 8;
827 
828 	/* if the field width is exactly 32 on an x86 machine, then the shift
829 	 * operation will not work because the SHL instructions count is masked
830 	 * to 5 bits so the shift will do nothing
831 	 */
832 	if (ce_info->width < 32)
833 		mask = BIT(ce_info->width) - 1;
834 	else
835 		mask = ~(u32)0;
836 
837 	/* don't swizzle the bits until after the mask because the mask bits
838 	 * will be in a different bit position on big endian machines
839 	 */
840 	src_dword = *(u32 *)from;
841 	src_dword &= mask;
842 
843 	/* shift to correct alignment */
844 	mask <<= shift_width;
845 	src_dword <<= shift_width;
846 
847 	/* get the current bits from the target bit string */
848 	dest = hmc_bits + (ce_info->lsb / 8);
849 
850 	memcpy(&dest_dword, dest, sizeof(dest_dword));
851 
852 	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
853 	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
854 
855 	/* put it all back */
856 	memcpy(dest, &dest_dword, sizeof(dest_dword));
857 }
858 
859 /**
860  * i40e_write_qword - replace HMC context qword
861  * @hmc_bits: pointer to the HMC memory
862  * @ce_info: a description of the struct to be read from
863  * @src: the struct to be read from
864  **/
865 static void i40e_write_qword(u8 *hmc_bits,
866 			     struct i40e_context_ele *ce_info,
867 			     u8 *src)
868 {
869 	u64 src_qword, mask;
870 	u8 *from, *dest;
871 	u16 shift_width;
872 	__le64 dest_qword;
873 
874 	/* copy from the next struct field */
875 	from = src + ce_info->offset;
876 
877 	/* prepare the bits and mask */
878 	shift_width = ce_info->lsb % 8;
879 
880 	/* if the field width is exactly 64 on an x86 machine, then the shift
881 	 * operation will not work because the SHL instructions count is masked
882 	 * to 6 bits so the shift will do nothing
883 	 */
884 	if (ce_info->width < 64)
885 		mask = BIT_ULL(ce_info->width) - 1;
886 	else
887 		mask = ~(u64)0;
888 
889 	/* don't swizzle the bits until after the mask because the mask bits
890 	 * will be in a different bit position on big endian machines
891 	 */
892 	src_qword = *(u64 *)from;
893 	src_qword &= mask;
894 
895 	/* shift to correct alignment */
896 	mask <<= shift_width;
897 	src_qword <<= shift_width;
898 
899 	/* get the current bits from the target bit string */
900 	dest = hmc_bits + (ce_info->lsb / 8);
901 
902 	memcpy(&dest_qword, dest, sizeof(dest_qword));
903 
904 	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
905 	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
906 
907 	/* put it all back */
908 	memcpy(dest, &dest_qword, sizeof(dest_qword));
909 }
910 
911 /**
912  * i40e_clear_hmc_context - zero out the HMC context bits
913  * @hw:       the hardware struct
914  * @context_bytes: pointer to the context bit array (DMA memory)
915  * @hmc_type: the type of HMC resource
916  **/
917 static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
918 					u8 *context_bytes,
919 					enum i40e_hmc_lan_rsrc_type hmc_type)
920 {
921 	/* clean the bit array */
922 	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
923 
924 	return 0;
925 }
926 
927 /**
928  * i40e_set_hmc_context - replace HMC context bits
929  * @context_bytes: pointer to the context bit array
930  * @ce_info:  a description of the struct to be filled
931  * @dest:     the struct to be filled
932  **/
933 static i40e_status i40e_set_hmc_context(u8 *context_bytes,
934 					struct i40e_context_ele *ce_info,
935 					u8 *dest)
936 {
937 	int f;
938 
939 	for (f = 0; ce_info[f].width != 0; f++) {
940 
941 		/* we have to deal with each element of the HMC using the
942 		 * correct size so that we are correct regardless of the
943 		 * endianness of the machine
944 		 */
945 		switch (ce_info[f].size_of) {
946 		case 1:
947 			i40e_write_byte(context_bytes, &ce_info[f], dest);
948 			break;
949 		case 2:
950 			i40e_write_word(context_bytes, &ce_info[f], dest);
951 			break;
952 		case 4:
953 			i40e_write_dword(context_bytes, &ce_info[f], dest);
954 			break;
955 		case 8:
956 			i40e_write_qword(context_bytes, &ce_info[f], dest);
957 			break;
958 		}
959 	}
960 
961 	return 0;
962 }
963 
964 /**
965  * i40e_hmc_get_object_va - retrieves an object's virtual address
966  * @hmc_info: pointer to i40e_hmc_info struct
967  * @object_base: pointer to u64 to get the va
968  * @rsrc_type: the hmc resource type
969  * @obj_idx: hmc object index
970  *
971  * This function retrieves the object's virtual address from the object
972  * base pointer.  This function is used for LAN Queue contexts.
973  **/
974 static
975 i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
976 					u8 **object_base,
977 					enum i40e_hmc_lan_rsrc_type rsrc_type,
978 					u32 obj_idx)
979 {
980 	u32 obj_offset_in_sd, obj_offset_in_pd;
981 	i40e_status ret_code = 0;
982 	struct i40e_hmc_sd_entry *sd_entry;
983 	struct i40e_hmc_pd_entry *pd_entry;
984 	u32 pd_idx, pd_lmt, rel_pd_idx;
985 	u64 obj_offset_in_fpm;
986 	u32 sd_idx, sd_lmt;
987 
988 	if (NULL == hmc_info) {
989 		ret_code = I40E_ERR_BAD_PTR;
990 		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
991 		goto exit;
992 	}
993 	if (NULL == hmc_info->hmc_obj) {
994 		ret_code = I40E_ERR_BAD_PTR;
995 		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
996 		goto exit;
997 	}
998 	if (NULL == object_base) {
999 		ret_code = I40E_ERR_BAD_PTR;
1000 		hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
1001 		goto exit;
1002 	}
1003 	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1004 		ret_code = I40E_ERR_BAD_PTR;
1005 		hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
1006 		goto exit;
1007 	}
1008 	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1009 		hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
1010 			  ret_code);
1011 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1012 		goto exit;
1013 	}
1014 	/* find sd index and limit */
1015 	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1016 				 &sd_idx, &sd_lmt);
1017 
1018 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1019 	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1020 			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1021 
1022 	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1023 		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1024 					 &pd_idx, &pd_lmt);
1025 		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1026 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1027 		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1028 					 I40E_HMC_PAGED_BP_SIZE);
1029 		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1030 	} else {
1031 		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1032 					 I40E_HMC_DIRECT_BP_SIZE);
1033 		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1034 	}
1035 exit:
1036 	return ret_code;
1037 }
1038 
1039 /**
1040  * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1041  * @hw:    the hardware struct
1042  * @queue: the queue we care about
1043  **/
1044 i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1045 						      u16 queue)
1046 {
1047 	i40e_status err;
1048 	u8 *context_bytes;
1049 
1050 	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1051 				     I40E_HMC_LAN_TX, queue);
1052 	if (err < 0)
1053 		return err;
1054 
1055 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1056 }
1057 
1058 /**
1059  * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1060  * @hw:    the hardware struct
1061  * @queue: the queue we care about
1062  * @s:     the struct to be filled
1063  **/
1064 i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1065 						    u16 queue,
1066 						    struct i40e_hmc_obj_txq *s)
1067 {
1068 	i40e_status err;
1069 	u8 *context_bytes;
1070 
1071 	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1072 				     I40E_HMC_LAN_TX, queue);
1073 	if (err < 0)
1074 		return err;
1075 
1076 	return i40e_set_hmc_context(context_bytes,
1077 				    i40e_hmc_txq_ce_info, (u8 *)s);
1078 }
1079 
1080 /**
1081  * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1082  * @hw:    the hardware struct
1083  * @queue: the queue we care about
1084  **/
1085 i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1086 						      u16 queue)
1087 {
1088 	i40e_status err;
1089 	u8 *context_bytes;
1090 
1091 	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1092 				     I40E_HMC_LAN_RX, queue);
1093 	if (err < 0)
1094 		return err;
1095 
1096 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1097 }
1098 
1099 /**
1100  * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1101  * @hw:    the hardware struct
1102  * @queue: the queue we care about
1103  * @s:     the struct to be filled
1104  **/
1105 i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1106 						    u16 queue,
1107 						    struct i40e_hmc_obj_rxq *s)
1108 {
1109 	i40e_status err;
1110 	u8 *context_bytes;
1111 
1112 	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1113 				     I40E_HMC_LAN_RX, queue);
1114 	if (err < 0)
1115 		return err;
1116 
1117 	return i40e_set_hmc_context(context_bytes,
1118 				    i40e_hmc_rxq_ce_info, (u8 *)s);
1119 }
1120