1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "ena_com.h"
34 
35 /*****************************************************************************/
36 /*****************************************************************************/
37 
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
40 
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
43 
44 
45 #define ENA_CTRL_MAJOR		0
46 #define ENA_CTRL_MINOR		0
47 #define ENA_CTRL_SUB_MINOR	1
48 
49 #define MIN_ENA_CTRL_VER \
50 	(((ENA_CTRL_MAJOR) << \
51 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 	((ENA_CTRL_MINOR) << \
53 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 	(ENA_CTRL_SUB_MINOR))
55 
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
58 
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60 
61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
62 
63 #define ENA_REGS_ADMIN_INTR_MASK 1
64 
65 #define ENA_POLL_MS	5
66 
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
70 
71 enum ena_cmd_status {
72 	ENA_CMD_SUBMITTED,
73 	ENA_CMD_COMPLETED,
74 	/* Abort - canceled by the driver */
75 	ENA_CMD_ABORTED,
76 };
77 
78 struct ena_comp_ctx {
79 	struct completion wait_event;
80 	struct ena_admin_acq_entry *user_cqe;
81 	u32 comp_size;
82 	enum ena_cmd_status status;
83 	/* status from the device */
84 	u8 comp_status;
85 	u8 cmd_opcode;
86 	bool occupied;
87 };
88 
89 struct ena_com_stats_ctx {
90 	struct ena_admin_aq_get_stats_cmd get_cmd;
91 	struct ena_admin_acq_get_stats_resp get_resp;
92 };
93 
94 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 				       struct ena_common_mem_addr *ena_addr,
96 				       dma_addr_t addr)
97 {
98 	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 		pr_err("dma address has more bits that the device supports\n");
100 		return -EINVAL;
101 	}
102 
103 	ena_addr->mem_addr_low = lower_32_bits(addr);
104 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
105 
106 	return 0;
107 }
108 
109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110 {
111 	struct ena_com_admin_sq *sq = &queue->sq;
112 	u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113 
114 	sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 					 GFP_KERNEL);
116 
117 	if (!sq->entries) {
118 		pr_err("memory allocation failed\n");
119 		return -ENOMEM;
120 	}
121 
122 	sq->head = 0;
123 	sq->tail = 0;
124 	sq->phase = 1;
125 
126 	sq->db_addr = NULL;
127 
128 	return 0;
129 }
130 
131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132 {
133 	struct ena_com_admin_cq *cq = &queue->cq;
134 	u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135 
136 	cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 					 GFP_KERNEL);
138 
139 	if (!cq->entries) {
140 		pr_err("memory allocation failed\n");
141 		return -ENOMEM;
142 	}
143 
144 	cq->head = 0;
145 	cq->phase = 1;
146 
147 	return 0;
148 }
149 
150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 				   struct ena_aenq_handlers *aenq_handlers)
152 {
153 	struct ena_com_aenq *aenq = &dev->aenq;
154 	u32 addr_low, addr_high, aenq_caps;
155 	u16 size;
156 
157 	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 	aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 					   GFP_KERNEL);
161 
162 	if (!aenq->entries) {
163 		pr_err("memory allocation failed\n");
164 		return -ENOMEM;
165 	}
166 
167 	aenq->head = aenq->q_depth;
168 	aenq->phase = 1;
169 
170 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172 
173 	writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 	writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
175 
176 	aenq_caps = 0;
177 	aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 	aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 		      << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 		     ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 	writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182 
183 	if (unlikely(!aenq_handlers)) {
184 		pr_err("aenq handlers pointer is NULL\n");
185 		return -EINVAL;
186 	}
187 
188 	aenq->aenq_handlers = aenq_handlers;
189 
190 	return 0;
191 }
192 
193 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 				     struct ena_comp_ctx *comp_ctx)
195 {
196 	comp_ctx->occupied = false;
197 	atomic_dec(&queue->outstanding_cmds);
198 }
199 
200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 					  u16 command_id, bool capture)
202 {
203 	if (unlikely(!queue->comp_ctx)) {
204 		pr_err("Completion context is NULL\n");
205 		return NULL;
206 	}
207 
208 	if (unlikely(command_id >= queue->q_depth)) {
209 		pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
210 		       command_id, queue->q_depth);
211 		return NULL;
212 	}
213 
214 	if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
215 		pr_err("Completion context is occupied\n");
216 		return NULL;
217 	}
218 
219 	if (capture) {
220 		atomic_inc(&queue->outstanding_cmds);
221 		queue->comp_ctx[command_id].occupied = true;
222 	}
223 
224 	return &queue->comp_ctx[command_id];
225 }
226 
227 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
228 						       struct ena_admin_aq_entry *cmd,
229 						       size_t cmd_size_in_bytes,
230 						       struct ena_admin_acq_entry *comp,
231 						       size_t comp_size_in_bytes)
232 {
233 	struct ena_comp_ctx *comp_ctx;
234 	u16 tail_masked, cmd_id;
235 	u16 queue_size_mask;
236 	u16 cnt;
237 
238 	queue_size_mask = admin_queue->q_depth - 1;
239 
240 	tail_masked = admin_queue->sq.tail & queue_size_mask;
241 
242 	/* In case of queue FULL */
243 	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
244 	if (cnt >= admin_queue->q_depth) {
245 		pr_debug("admin queue is full.\n");
246 		admin_queue->stats.out_of_space++;
247 		return ERR_PTR(-ENOSPC);
248 	}
249 
250 	cmd_id = admin_queue->curr_cmd_id;
251 
252 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
253 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
254 
255 	cmd->aq_common_descriptor.command_id |= cmd_id &
256 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
257 
258 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
259 	if (unlikely(!comp_ctx))
260 		return ERR_PTR(-EINVAL);
261 
262 	comp_ctx->status = ENA_CMD_SUBMITTED;
263 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
264 	comp_ctx->user_cqe = comp;
265 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
266 
267 	reinit_completion(&comp_ctx->wait_event);
268 
269 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
270 
271 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
272 		queue_size_mask;
273 
274 	admin_queue->sq.tail++;
275 	admin_queue->stats.submitted_cmd++;
276 
277 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
278 		admin_queue->sq.phase = !admin_queue->sq.phase;
279 
280 	writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
281 
282 	return comp_ctx;
283 }
284 
285 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
286 {
287 	size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
288 	struct ena_comp_ctx *comp_ctx;
289 	u16 i;
290 
291 	queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
292 	if (unlikely(!queue->comp_ctx)) {
293 		pr_err("memory allocation failed\n");
294 		return -ENOMEM;
295 	}
296 
297 	for (i = 0; i < queue->q_depth; i++) {
298 		comp_ctx = get_comp_ctxt(queue, i, false);
299 		if (comp_ctx)
300 			init_completion(&comp_ctx->wait_event);
301 	}
302 
303 	return 0;
304 }
305 
306 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
307 						     struct ena_admin_aq_entry *cmd,
308 						     size_t cmd_size_in_bytes,
309 						     struct ena_admin_acq_entry *comp,
310 						     size_t comp_size_in_bytes)
311 {
312 	unsigned long flags = 0;
313 	struct ena_comp_ctx *comp_ctx;
314 
315 	spin_lock_irqsave(&admin_queue->q_lock, flags);
316 	if (unlikely(!admin_queue->running_state)) {
317 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
318 		return ERR_PTR(-ENODEV);
319 	}
320 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
321 					      cmd_size_in_bytes,
322 					      comp,
323 					      comp_size_in_bytes);
324 	if (IS_ERR(comp_ctx))
325 		admin_queue->running_state = false;
326 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
327 
328 	return comp_ctx;
329 }
330 
331 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
332 			      struct ena_com_create_io_ctx *ctx,
333 			      struct ena_com_io_sq *io_sq)
334 {
335 	size_t size;
336 	int dev_node = 0;
337 
338 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
339 
340 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
341 	io_sq->desc_entry_size =
342 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
343 		sizeof(struct ena_eth_io_tx_desc) :
344 		sizeof(struct ena_eth_io_rx_desc);
345 
346 	size = io_sq->desc_entry_size * io_sq->q_depth;
347 
348 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
349 		dev_node = dev_to_node(ena_dev->dmadev);
350 		set_dev_node(ena_dev->dmadev, ctx->numa_node);
351 		io_sq->desc_addr.virt_addr =
352 			dma_alloc_coherent(ena_dev->dmadev, size,
353 					   &io_sq->desc_addr.phys_addr,
354 					   GFP_KERNEL);
355 		set_dev_node(ena_dev->dmadev, dev_node);
356 		if (!io_sq->desc_addr.virt_addr) {
357 			io_sq->desc_addr.virt_addr =
358 				dma_alloc_coherent(ena_dev->dmadev, size,
359 						   &io_sq->desc_addr.phys_addr,
360 						   GFP_KERNEL);
361 		}
362 
363 		if (!io_sq->desc_addr.virt_addr) {
364 			pr_err("memory allocation failed\n");
365 			return -ENOMEM;
366 		}
367 	}
368 
369 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
370 		/* Allocate bounce buffers */
371 		io_sq->bounce_buf_ctrl.buffer_size =
372 			ena_dev->llq_info.desc_list_entry_size;
373 		io_sq->bounce_buf_ctrl.buffers_num =
374 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
375 		io_sq->bounce_buf_ctrl.next_to_use = 0;
376 
377 		size = io_sq->bounce_buf_ctrl.buffer_size *
378 			 io_sq->bounce_buf_ctrl.buffers_num;
379 
380 		dev_node = dev_to_node(ena_dev->dmadev);
381 		set_dev_node(ena_dev->dmadev, ctx->numa_node);
382 		io_sq->bounce_buf_ctrl.base_buffer =
383 			devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
384 		set_dev_node(ena_dev->dmadev, dev_node);
385 		if (!io_sq->bounce_buf_ctrl.base_buffer)
386 			io_sq->bounce_buf_ctrl.base_buffer =
387 				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
388 
389 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
390 			pr_err("bounce buffer memory allocation failed\n");
391 			return -ENOMEM;
392 		}
393 
394 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
395 		       sizeof(io_sq->llq_info));
396 
397 		/* Initiate the first bounce buffer */
398 		io_sq->llq_buf_ctrl.curr_bounce_buf =
399 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
400 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
401 		       0x0, io_sq->llq_info.desc_list_entry_size);
402 		io_sq->llq_buf_ctrl.descs_left_in_line =
403 			io_sq->llq_info.descs_num_before_header;
404 
405 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
406 			io_sq->entries_in_tx_burst_left =
407 				io_sq->llq_info.max_entries_in_tx_burst;
408 	}
409 
410 	io_sq->tail = 0;
411 	io_sq->next_to_comp = 0;
412 	io_sq->phase = 1;
413 
414 	return 0;
415 }
416 
417 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
418 			      struct ena_com_create_io_ctx *ctx,
419 			      struct ena_com_io_cq *io_cq)
420 {
421 	size_t size;
422 	int prev_node = 0;
423 
424 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
425 
426 	/* Use the basic completion descriptor for Rx */
427 	io_cq->cdesc_entry_size_in_bytes =
428 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
429 		sizeof(struct ena_eth_io_tx_cdesc) :
430 		sizeof(struct ena_eth_io_rx_cdesc_base);
431 
432 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
433 
434 	prev_node = dev_to_node(ena_dev->dmadev);
435 	set_dev_node(ena_dev->dmadev, ctx->numa_node);
436 	io_cq->cdesc_addr.virt_addr =
437 		dma_alloc_coherent(ena_dev->dmadev, size,
438 				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
439 	set_dev_node(ena_dev->dmadev, prev_node);
440 	if (!io_cq->cdesc_addr.virt_addr) {
441 		io_cq->cdesc_addr.virt_addr =
442 			dma_alloc_coherent(ena_dev->dmadev, size,
443 					   &io_cq->cdesc_addr.phys_addr,
444 					   GFP_KERNEL);
445 	}
446 
447 	if (!io_cq->cdesc_addr.virt_addr) {
448 		pr_err("memory allocation failed\n");
449 		return -ENOMEM;
450 	}
451 
452 	io_cq->phase = 1;
453 	io_cq->head = 0;
454 
455 	return 0;
456 }
457 
458 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
459 						   struct ena_admin_acq_entry *cqe)
460 {
461 	struct ena_comp_ctx *comp_ctx;
462 	u16 cmd_id;
463 
464 	cmd_id = cqe->acq_common_descriptor.command &
465 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
466 
467 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
468 	if (unlikely(!comp_ctx)) {
469 		pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
470 		admin_queue->running_state = false;
471 		return;
472 	}
473 
474 	comp_ctx->status = ENA_CMD_COMPLETED;
475 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
476 
477 	if (comp_ctx->user_cqe)
478 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
479 
480 	if (!admin_queue->polling)
481 		complete(&comp_ctx->wait_event);
482 }
483 
484 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
485 {
486 	struct ena_admin_acq_entry *cqe = NULL;
487 	u16 comp_num = 0;
488 	u16 head_masked;
489 	u8 phase;
490 
491 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
492 	phase = admin_queue->cq.phase;
493 
494 	cqe = &admin_queue->cq.entries[head_masked];
495 
496 	/* Go over all the completions */
497 	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
498 		ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
499 		/* Do not read the rest of the completion entry before the
500 		 * phase bit was validated
501 		 */
502 		dma_rmb();
503 		ena_com_handle_single_admin_completion(admin_queue, cqe);
504 
505 		head_masked++;
506 		comp_num++;
507 		if (unlikely(head_masked == admin_queue->q_depth)) {
508 			head_masked = 0;
509 			phase = !phase;
510 		}
511 
512 		cqe = &admin_queue->cq.entries[head_masked];
513 	}
514 
515 	admin_queue->cq.head += comp_num;
516 	admin_queue->cq.phase = phase;
517 	admin_queue->sq.head += comp_num;
518 	admin_queue->stats.completed_cmd += comp_num;
519 }
520 
521 static int ena_com_comp_status_to_errno(u8 comp_status)
522 {
523 	if (unlikely(comp_status != 0))
524 		pr_err("admin command failed[%u]\n", comp_status);
525 
526 	if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
527 		return -EINVAL;
528 
529 	switch (comp_status) {
530 	case ENA_ADMIN_SUCCESS:
531 		return 0;
532 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
533 		return -ENOMEM;
534 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
535 		return -EOPNOTSUPP;
536 	case ENA_ADMIN_BAD_OPCODE:
537 	case ENA_ADMIN_MALFORMED_REQUEST:
538 	case ENA_ADMIN_ILLEGAL_PARAMETER:
539 	case ENA_ADMIN_UNKNOWN_ERROR:
540 		return -EINVAL;
541 	}
542 
543 	return 0;
544 }
545 
546 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
547 						     struct ena_com_admin_queue *admin_queue)
548 {
549 	unsigned long flags = 0;
550 	unsigned long timeout;
551 	int ret;
552 
553 	timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
554 
555 	while (1) {
556 		spin_lock_irqsave(&admin_queue->q_lock, flags);
557 		ena_com_handle_admin_completion(admin_queue);
558 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
559 
560 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
561 			break;
562 
563 		if (time_is_before_jiffies(timeout)) {
564 			pr_err("Wait for completion (polling) timeout\n");
565 			/* ENA didn't have any completion */
566 			spin_lock_irqsave(&admin_queue->q_lock, flags);
567 			admin_queue->stats.no_completion++;
568 			admin_queue->running_state = false;
569 			spin_unlock_irqrestore(&admin_queue->q_lock, flags);
570 
571 			ret = -ETIME;
572 			goto err;
573 		}
574 
575 		msleep(ENA_POLL_MS);
576 	}
577 
578 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
579 		pr_err("Command was aborted\n");
580 		spin_lock_irqsave(&admin_queue->q_lock, flags);
581 		admin_queue->stats.aborted_cmd++;
582 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
583 		ret = -ENODEV;
584 		goto err;
585 	}
586 
587 	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
588 	     comp_ctx->status);
589 
590 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
591 err:
592 	comp_ctxt_release(admin_queue, comp_ctx);
593 	return ret;
594 }
595 
596 /**
597  * Set the LLQ configurations of the firmware
598  *
599  * The driver provides only the enabled feature values to the device,
600  * which in turn, checks if they are supported.
601  */
602 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
603 {
604 	struct ena_com_admin_queue *admin_queue;
605 	struct ena_admin_set_feat_cmd cmd;
606 	struct ena_admin_set_feat_resp resp;
607 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
608 	int ret;
609 
610 	memset(&cmd, 0x0, sizeof(cmd));
611 	admin_queue = &ena_dev->admin_queue;
612 
613 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
614 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
615 
616 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
617 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
618 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
619 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
620 
621 	ret = ena_com_execute_admin_command(admin_queue,
622 					    (struct ena_admin_aq_entry *)&cmd,
623 					    sizeof(cmd),
624 					    (struct ena_admin_acq_entry *)&resp,
625 					    sizeof(resp));
626 
627 	if (unlikely(ret))
628 		pr_err("Failed to set LLQ configurations: %d\n", ret);
629 
630 	return ret;
631 }
632 
633 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
634 				   struct ena_admin_feature_llq_desc *llq_features,
635 				   struct ena_llq_configurations *llq_default_cfg)
636 {
637 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
638 	u16 supported_feat;
639 	int rc;
640 
641 	memset(llq_info, 0, sizeof(*llq_info));
642 
643 	supported_feat = llq_features->header_location_ctrl_supported;
644 
645 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
646 		llq_info->header_location_ctrl =
647 			llq_default_cfg->llq_header_location;
648 	} else {
649 		pr_err("Invalid header location control, supported: 0x%x\n",
650 		       supported_feat);
651 		return -EINVAL;
652 	}
653 
654 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
655 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
656 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
657 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
658 		} else	{
659 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
660 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
661 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
662 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
663 			} else {
664 				pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
665 				       supported_feat);
666 				return -EINVAL;
667 			}
668 
669 			pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
670 			       llq_default_cfg->llq_stride_ctrl, supported_feat,
671 			       llq_info->desc_stride_ctrl);
672 		}
673 	} else {
674 		llq_info->desc_stride_ctrl = 0;
675 	}
676 
677 	supported_feat = llq_features->entry_size_ctrl_supported;
678 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
679 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
680 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
681 	} else {
682 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
683 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
684 			llq_info->desc_list_entry_size = 128;
685 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
686 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
687 			llq_info->desc_list_entry_size = 192;
688 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
689 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
690 			llq_info->desc_list_entry_size = 256;
691 		} else {
692 			pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
693 			       supported_feat);
694 			return -EINVAL;
695 		}
696 
697 		pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
698 		       llq_default_cfg->llq_ring_entry_size, supported_feat,
699 		       llq_info->desc_list_entry_size);
700 	}
701 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
702 		/* The desc list entry size should be whole multiply of 8
703 		 * This requirement comes from __iowrite64_copy()
704 		 */
705 		pr_err("illegal entry size %d\n",
706 		       llq_info->desc_list_entry_size);
707 		return -EINVAL;
708 	}
709 
710 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
711 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
712 			sizeof(struct ena_eth_io_tx_desc);
713 	else
714 		llq_info->descs_per_entry = 1;
715 
716 	supported_feat = llq_features->desc_num_before_header_supported;
717 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
718 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
719 	} else {
720 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
721 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
722 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
723 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
724 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
725 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
726 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
727 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
728 		} else {
729 			pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
730 			       supported_feat);
731 			return -EINVAL;
732 		}
733 
734 		pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
735 		       llq_default_cfg->llq_num_decs_before_header,
736 		       supported_feat, llq_info->descs_num_before_header);
737 	}
738 
739 	llq_info->max_entries_in_tx_burst =
740 		(u16)(llq_features->max_tx_burst_size /	llq_default_cfg->llq_ring_entry_size_value);
741 
742 	rc = ena_com_set_llq(ena_dev);
743 	if (rc)
744 		pr_err("Cannot set LLQ configuration: %d\n", rc);
745 
746 	return rc;
747 }
748 
749 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
750 							struct ena_com_admin_queue *admin_queue)
751 {
752 	unsigned long flags = 0;
753 	int ret;
754 
755 	wait_for_completion_timeout(&comp_ctx->wait_event,
756 				    usecs_to_jiffies(
757 					    admin_queue->completion_timeout));
758 
759 	/* In case the command wasn't completed find out the root cause.
760 	 * There might be 2 kinds of errors
761 	 * 1) No completion (timeout reached)
762 	 * 2) There is completion but the device didn't get any msi-x interrupt.
763 	 */
764 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
765 		spin_lock_irqsave(&admin_queue->q_lock, flags);
766 		ena_com_handle_admin_completion(admin_queue);
767 		admin_queue->stats.no_completion++;
768 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
769 
770 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
771 			pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
772 			       comp_ctx->cmd_opcode,
773 			       admin_queue->auto_polling ? "ON" : "OFF");
774 			/* Check if fallback to polling is enabled */
775 			if (admin_queue->auto_polling)
776 				admin_queue->polling = true;
777 		} else {
778 			pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
779 			       comp_ctx->cmd_opcode, comp_ctx->status);
780 		}
781 		/* Check if shifted to polling mode.
782 		 * This will happen if there is a completion without an interrupt
783 		 * and autopolling mode is enabled. Continuing normal execution in such case
784 		 */
785 		if (!admin_queue->polling) {
786 			admin_queue->running_state = false;
787 			ret = -ETIME;
788 			goto err;
789 		}
790 	}
791 
792 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
793 err:
794 	comp_ctxt_release(admin_queue, comp_ctx);
795 	return ret;
796 }
797 
798 /* This method read the hardware device register through posting writes
799  * and waiting for response
800  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
801  */
802 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
803 {
804 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
805 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
806 		mmio_read->read_resp;
807 	u32 mmio_read_reg, ret, i;
808 	unsigned long flags = 0;
809 	u32 timeout = mmio_read->reg_read_to;
810 
811 	might_sleep();
812 
813 	if (timeout == 0)
814 		timeout = ENA_REG_READ_TIMEOUT;
815 
816 	/* If readless is disabled, perform regular read */
817 	if (!mmio_read->readless_supported)
818 		return readl(ena_dev->reg_bar + offset);
819 
820 	spin_lock_irqsave(&mmio_read->lock, flags);
821 	mmio_read->seq_num++;
822 
823 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
824 	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
825 			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
826 	mmio_read_reg |= mmio_read->seq_num &
827 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
828 
829 	writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
830 
831 	for (i = 0; i < timeout; i++) {
832 		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
833 			break;
834 
835 		udelay(1);
836 	}
837 
838 	if (unlikely(i == timeout)) {
839 		pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
840 		       mmio_read->seq_num, offset, read_resp->req_id,
841 		       read_resp->reg_off);
842 		ret = ENA_MMIO_READ_TIMEOUT;
843 		goto err;
844 	}
845 
846 	if (read_resp->reg_off != offset) {
847 		pr_err("Read failure: wrong offset provided\n");
848 		ret = ENA_MMIO_READ_TIMEOUT;
849 	} else {
850 		ret = read_resp->reg_val;
851 	}
852 err:
853 	spin_unlock_irqrestore(&mmio_read->lock, flags);
854 
855 	return ret;
856 }
857 
858 /* There are two types to wait for completion.
859  * Polling mode - wait until the completion is available.
860  * Async mode - wait on wait queue until the completion is ready
861  * (or the timeout expired).
862  * It is expected that the IRQ called ena_com_handle_admin_completion
863  * to mark the completions.
864  */
865 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
866 					     struct ena_com_admin_queue *admin_queue)
867 {
868 	if (admin_queue->polling)
869 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
870 								 admin_queue);
871 
872 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
873 							    admin_queue);
874 }
875 
876 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
877 				 struct ena_com_io_sq *io_sq)
878 {
879 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
880 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
881 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
882 	u8 direction;
883 	int ret;
884 
885 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
886 
887 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
888 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
889 	else
890 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
891 
892 	destroy_cmd.sq.sq_identity |= (direction <<
893 		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
894 		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
895 
896 	destroy_cmd.sq.sq_idx = io_sq->idx;
897 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
898 
899 	ret = ena_com_execute_admin_command(admin_queue,
900 					    (struct ena_admin_aq_entry *)&destroy_cmd,
901 					    sizeof(destroy_cmd),
902 					    (struct ena_admin_acq_entry *)&destroy_resp,
903 					    sizeof(destroy_resp));
904 
905 	if (unlikely(ret && (ret != -ENODEV)))
906 		pr_err("failed to destroy io sq error: %d\n", ret);
907 
908 	return ret;
909 }
910 
911 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
912 				  struct ena_com_io_sq *io_sq,
913 				  struct ena_com_io_cq *io_cq)
914 {
915 	size_t size;
916 
917 	if (io_cq->cdesc_addr.virt_addr) {
918 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
919 
920 		dma_free_coherent(ena_dev->dmadev, size,
921 				  io_cq->cdesc_addr.virt_addr,
922 				  io_cq->cdesc_addr.phys_addr);
923 
924 		io_cq->cdesc_addr.virt_addr = NULL;
925 	}
926 
927 	if (io_sq->desc_addr.virt_addr) {
928 		size = io_sq->desc_entry_size * io_sq->q_depth;
929 
930 		dma_free_coherent(ena_dev->dmadev, size,
931 				  io_sq->desc_addr.virt_addr,
932 				  io_sq->desc_addr.phys_addr);
933 
934 		io_sq->desc_addr.virt_addr = NULL;
935 	}
936 
937 	if (io_sq->bounce_buf_ctrl.base_buffer) {
938 		devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
939 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
940 	}
941 }
942 
943 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
944 				u16 exp_state)
945 {
946 	u32 val, i;
947 
948 	/* Convert timeout from resolution of 100ms to ENA_POLL_MS */
949 	timeout = (timeout * 100) / ENA_POLL_MS;
950 
951 	for (i = 0; i < timeout; i++) {
952 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
953 
954 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
955 			pr_err("Reg read timeout occurred\n");
956 			return -ETIME;
957 		}
958 
959 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
960 			exp_state)
961 			return 0;
962 
963 		msleep(ENA_POLL_MS);
964 	}
965 
966 	return -ETIME;
967 }
968 
969 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
970 					       enum ena_admin_aq_feature_id feature_id)
971 {
972 	u32 feature_mask = 1 << feature_id;
973 
974 	/* Device attributes is always supported */
975 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
976 	    !(ena_dev->supported_features & feature_mask))
977 		return false;
978 
979 	return true;
980 }
981 
982 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
983 				  struct ena_admin_get_feat_resp *get_resp,
984 				  enum ena_admin_aq_feature_id feature_id,
985 				  dma_addr_t control_buf_dma_addr,
986 				  u32 control_buff_size,
987 				  u8 feature_ver)
988 {
989 	struct ena_com_admin_queue *admin_queue;
990 	struct ena_admin_get_feat_cmd get_cmd;
991 	int ret;
992 
993 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
994 		pr_debug("Feature %d isn't supported\n", feature_id);
995 		return -EOPNOTSUPP;
996 	}
997 
998 	memset(&get_cmd, 0x0, sizeof(get_cmd));
999 	admin_queue = &ena_dev->admin_queue;
1000 
1001 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1002 
1003 	if (control_buff_size)
1004 		get_cmd.aq_common_descriptor.flags =
1005 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1006 	else
1007 		get_cmd.aq_common_descriptor.flags = 0;
1008 
1009 	ret = ena_com_mem_addr_set(ena_dev,
1010 				   &get_cmd.control_buffer.address,
1011 				   control_buf_dma_addr);
1012 	if (unlikely(ret)) {
1013 		pr_err("memory address set failed\n");
1014 		return ret;
1015 	}
1016 
1017 	get_cmd.control_buffer.length = control_buff_size;
1018 	get_cmd.feat_common.feature_version = feature_ver;
1019 	get_cmd.feat_common.feature_id = feature_id;
1020 
1021 	ret = ena_com_execute_admin_command(admin_queue,
1022 					    (struct ena_admin_aq_entry *)
1023 					    &get_cmd,
1024 					    sizeof(get_cmd),
1025 					    (struct ena_admin_acq_entry *)
1026 					    get_resp,
1027 					    sizeof(*get_resp));
1028 
1029 	if (unlikely(ret))
1030 		pr_err("Failed to submit get_feature command %d error: %d\n",
1031 		       feature_id, ret);
1032 
1033 	return ret;
1034 }
1035 
1036 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1037 			       struct ena_admin_get_feat_resp *get_resp,
1038 			       enum ena_admin_aq_feature_id feature_id,
1039 			       u8 feature_ver)
1040 {
1041 	return ena_com_get_feature_ex(ena_dev,
1042 				      get_resp,
1043 				      feature_id,
1044 				      0,
1045 				      0,
1046 				      feature_ver);
1047 }
1048 
1049 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1050 {
1051 	return ena_dev->rss.hash_func;
1052 }
1053 
1054 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1055 {
1056 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1057 		(ena_dev->rss).hash_key;
1058 
1059 	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1060 	/* The key is stored in the device in u32 array
1061 	 * as well as the API requires the key to be passed in this
1062 	 * format. Thus the size of our array should be divided by 4
1063 	 */
1064 	hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1065 }
1066 
1067 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1068 {
1069 	struct ena_rss *rss = &ena_dev->rss;
1070 	struct ena_admin_get_feat_resp get_resp;
1071 	int rc;
1072 
1073 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
1074 				    ENA_ADMIN_RSS_HASH_FUNCTION,
1075 				    ena_dev->rss.hash_key_dma_addr,
1076 				    sizeof(ena_dev->rss.hash_key), 0);
1077 	if (unlikely(rc)) {
1078 		return -EOPNOTSUPP;
1079 	}
1080 
1081 	rss->hash_key =
1082 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1083 				   &rss->hash_key_dma_addr, GFP_KERNEL);
1084 
1085 	if (unlikely(!rss->hash_key))
1086 		return -ENOMEM;
1087 
1088 	return 0;
1089 }
1090 
1091 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1092 {
1093 	struct ena_rss *rss = &ena_dev->rss;
1094 
1095 	if (rss->hash_key)
1096 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1097 				  rss->hash_key, rss->hash_key_dma_addr);
1098 	rss->hash_key = NULL;
1099 }
1100 
1101 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1102 {
1103 	struct ena_rss *rss = &ena_dev->rss;
1104 
1105 	rss->hash_ctrl =
1106 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1107 				   &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1108 
1109 	if (unlikely(!rss->hash_ctrl))
1110 		return -ENOMEM;
1111 
1112 	return 0;
1113 }
1114 
1115 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1116 {
1117 	struct ena_rss *rss = &ena_dev->rss;
1118 
1119 	if (rss->hash_ctrl)
1120 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1121 				  rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1122 	rss->hash_ctrl = NULL;
1123 }
1124 
1125 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1126 					   u16 log_size)
1127 {
1128 	struct ena_rss *rss = &ena_dev->rss;
1129 	struct ena_admin_get_feat_resp get_resp;
1130 	size_t tbl_size;
1131 	int ret;
1132 
1133 	ret = ena_com_get_feature(ena_dev, &get_resp,
1134 				  ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1135 	if (unlikely(ret))
1136 		return ret;
1137 
1138 	if ((get_resp.u.ind_table.min_size > log_size) ||
1139 	    (get_resp.u.ind_table.max_size < log_size)) {
1140 		pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1141 		       1 << log_size, 1 << get_resp.u.ind_table.min_size,
1142 		       1 << get_resp.u.ind_table.max_size);
1143 		return -EINVAL;
1144 	}
1145 
1146 	tbl_size = (1ULL << log_size) *
1147 		sizeof(struct ena_admin_rss_ind_table_entry);
1148 
1149 	rss->rss_ind_tbl =
1150 		dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1151 				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1152 	if (unlikely(!rss->rss_ind_tbl))
1153 		goto mem_err1;
1154 
1155 	tbl_size = (1ULL << log_size) * sizeof(u16);
1156 	rss->host_rss_ind_tbl =
1157 		devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1158 	if (unlikely(!rss->host_rss_ind_tbl))
1159 		goto mem_err2;
1160 
1161 	rss->tbl_log_size = log_size;
1162 
1163 	return 0;
1164 
1165 mem_err2:
1166 	tbl_size = (1ULL << log_size) *
1167 		sizeof(struct ena_admin_rss_ind_table_entry);
1168 
1169 	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1170 			  rss->rss_ind_tbl_dma_addr);
1171 	rss->rss_ind_tbl = NULL;
1172 mem_err1:
1173 	rss->tbl_log_size = 0;
1174 	return -ENOMEM;
1175 }
1176 
1177 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1178 {
1179 	struct ena_rss *rss = &ena_dev->rss;
1180 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1181 		sizeof(struct ena_admin_rss_ind_table_entry);
1182 
1183 	if (rss->rss_ind_tbl)
1184 		dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1185 				  rss->rss_ind_tbl_dma_addr);
1186 	rss->rss_ind_tbl = NULL;
1187 
1188 	if (rss->host_rss_ind_tbl)
1189 		devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1190 	rss->host_rss_ind_tbl = NULL;
1191 }
1192 
1193 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1194 				struct ena_com_io_sq *io_sq, u16 cq_idx)
1195 {
1196 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1197 	struct ena_admin_aq_create_sq_cmd create_cmd;
1198 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1199 	u8 direction;
1200 	int ret;
1201 
1202 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1203 
1204 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1205 
1206 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1207 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1208 	else
1209 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1210 
1211 	create_cmd.sq_identity |= (direction <<
1212 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1213 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1214 
1215 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1216 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1217 
1218 	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1219 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1220 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1221 
1222 	create_cmd.sq_caps_3 |=
1223 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1224 
1225 	create_cmd.cq_idx = cq_idx;
1226 	create_cmd.sq_depth = io_sq->q_depth;
1227 
1228 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1229 		ret = ena_com_mem_addr_set(ena_dev,
1230 					   &create_cmd.sq_ba,
1231 					   io_sq->desc_addr.phys_addr);
1232 		if (unlikely(ret)) {
1233 			pr_err("memory address set failed\n");
1234 			return ret;
1235 		}
1236 	}
1237 
1238 	ret = ena_com_execute_admin_command(admin_queue,
1239 					    (struct ena_admin_aq_entry *)&create_cmd,
1240 					    sizeof(create_cmd),
1241 					    (struct ena_admin_acq_entry *)&cmd_completion,
1242 					    sizeof(cmd_completion));
1243 	if (unlikely(ret)) {
1244 		pr_err("Failed to create IO SQ. error: %d\n", ret);
1245 		return ret;
1246 	}
1247 
1248 	io_sq->idx = cmd_completion.sq_idx;
1249 
1250 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1251 		(uintptr_t)cmd_completion.sq_doorbell_offset);
1252 
1253 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1254 		io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1255 				+ cmd_completion.llq_headers_offset);
1256 
1257 		io_sq->desc_addr.pbuf_dev_addr =
1258 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1259 			cmd_completion.llq_descriptors_offset);
1260 	}
1261 
1262 	pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1263 
1264 	return ret;
1265 }
1266 
1267 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1268 {
1269 	struct ena_rss *rss = &ena_dev->rss;
1270 	struct ena_com_io_sq *io_sq;
1271 	u16 qid;
1272 	int i;
1273 
1274 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1275 		qid = rss->host_rss_ind_tbl[i];
1276 		if (qid >= ENA_TOTAL_NUM_QUEUES)
1277 			return -EINVAL;
1278 
1279 		io_sq = &ena_dev->io_sq_queues[qid];
1280 
1281 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1282 			return -EINVAL;
1283 
1284 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1291 						 u16 intr_delay_resolution)
1292 {
1293 	/* Initial value of intr_delay_resolution might be 0 */
1294 	u16 prev_intr_delay_resolution =
1295 		ena_dev->intr_delay_resolution ?
1296 		ena_dev->intr_delay_resolution :
1297 		ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1298 
1299 	if (!intr_delay_resolution) {
1300 		pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1301 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1302 	}
1303 
1304 	/* update Rx */
1305 	ena_dev->intr_moder_rx_interval =
1306 		ena_dev->intr_moder_rx_interval *
1307 		prev_intr_delay_resolution /
1308 		intr_delay_resolution;
1309 
1310 	/* update Tx */
1311 	ena_dev->intr_moder_tx_interval =
1312 		ena_dev->intr_moder_tx_interval *
1313 		prev_intr_delay_resolution /
1314 		intr_delay_resolution;
1315 
1316 	ena_dev->intr_delay_resolution = intr_delay_resolution;
1317 }
1318 
1319 /*****************************************************************************/
1320 /*******************************      API       ******************************/
1321 /*****************************************************************************/
1322 
1323 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1324 				  struct ena_admin_aq_entry *cmd,
1325 				  size_t cmd_size,
1326 				  struct ena_admin_acq_entry *comp,
1327 				  size_t comp_size)
1328 {
1329 	struct ena_comp_ctx *comp_ctx;
1330 	int ret;
1331 
1332 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1333 					    comp, comp_size);
1334 	if (IS_ERR(comp_ctx)) {
1335 		if (comp_ctx == ERR_PTR(-ENODEV))
1336 			pr_debug("Failed to submit command [%ld]\n",
1337 				 PTR_ERR(comp_ctx));
1338 		else
1339 			pr_err("Failed to submit command [%ld]\n",
1340 			       PTR_ERR(comp_ctx));
1341 
1342 		return PTR_ERR(comp_ctx);
1343 	}
1344 
1345 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1346 	if (unlikely(ret)) {
1347 		if (admin_queue->running_state)
1348 			pr_err("Failed to process command. ret = %d\n", ret);
1349 		else
1350 			pr_debug("Failed to process command. ret = %d\n", ret);
1351 	}
1352 	return ret;
1353 }
1354 
1355 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1356 			 struct ena_com_io_cq *io_cq)
1357 {
1358 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1359 	struct ena_admin_aq_create_cq_cmd create_cmd;
1360 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1361 	int ret;
1362 
1363 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1364 
1365 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1366 
1367 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1368 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1369 	create_cmd.cq_caps_1 |=
1370 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1371 
1372 	create_cmd.msix_vector = io_cq->msix_vector;
1373 	create_cmd.cq_depth = io_cq->q_depth;
1374 
1375 	ret = ena_com_mem_addr_set(ena_dev,
1376 				   &create_cmd.cq_ba,
1377 				   io_cq->cdesc_addr.phys_addr);
1378 	if (unlikely(ret)) {
1379 		pr_err("memory address set failed\n");
1380 		return ret;
1381 	}
1382 
1383 	ret = ena_com_execute_admin_command(admin_queue,
1384 					    (struct ena_admin_aq_entry *)&create_cmd,
1385 					    sizeof(create_cmd),
1386 					    (struct ena_admin_acq_entry *)&cmd_completion,
1387 					    sizeof(cmd_completion));
1388 	if (unlikely(ret)) {
1389 		pr_err("Failed to create IO CQ. error: %d\n", ret);
1390 		return ret;
1391 	}
1392 
1393 	io_cq->idx = cmd_completion.cq_idx;
1394 
1395 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1396 		cmd_completion.cq_interrupt_unmask_register_offset);
1397 
1398 	if (cmd_completion.cq_head_db_register_offset)
1399 		io_cq->cq_head_db_reg =
1400 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1401 			cmd_completion.cq_head_db_register_offset);
1402 
1403 	if (cmd_completion.numa_node_register_offset)
1404 		io_cq->numa_node_cfg_reg =
1405 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1406 			cmd_completion.numa_node_register_offset);
1407 
1408 	pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1409 
1410 	return ret;
1411 }
1412 
1413 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1414 			    struct ena_com_io_sq **io_sq,
1415 			    struct ena_com_io_cq **io_cq)
1416 {
1417 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1418 		pr_err("Invalid queue number %d but the max is %d\n", qid,
1419 		       ENA_TOTAL_NUM_QUEUES);
1420 		return -EINVAL;
1421 	}
1422 
1423 	*io_sq = &ena_dev->io_sq_queues[qid];
1424 	*io_cq = &ena_dev->io_cq_queues[qid];
1425 
1426 	return 0;
1427 }
1428 
1429 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1430 {
1431 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1432 	struct ena_comp_ctx *comp_ctx;
1433 	u16 i;
1434 
1435 	if (!admin_queue->comp_ctx)
1436 		return;
1437 
1438 	for (i = 0; i < admin_queue->q_depth; i++) {
1439 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1440 		if (unlikely(!comp_ctx))
1441 			break;
1442 
1443 		comp_ctx->status = ENA_CMD_ABORTED;
1444 
1445 		complete(&comp_ctx->wait_event);
1446 	}
1447 }
1448 
1449 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1450 {
1451 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1452 	unsigned long flags = 0;
1453 
1454 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1455 	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1456 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1457 		msleep(ENA_POLL_MS);
1458 		spin_lock_irqsave(&admin_queue->q_lock, flags);
1459 	}
1460 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1461 }
1462 
1463 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1464 			  struct ena_com_io_cq *io_cq)
1465 {
1466 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1467 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1468 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1469 	int ret;
1470 
1471 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1472 
1473 	destroy_cmd.cq_idx = io_cq->idx;
1474 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1475 
1476 	ret = ena_com_execute_admin_command(admin_queue,
1477 					    (struct ena_admin_aq_entry *)&destroy_cmd,
1478 					    sizeof(destroy_cmd),
1479 					    (struct ena_admin_acq_entry *)&destroy_resp,
1480 					    sizeof(destroy_resp));
1481 
1482 	if (unlikely(ret && (ret != -ENODEV)))
1483 		pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1484 
1485 	return ret;
1486 }
1487 
1488 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1489 {
1490 	return ena_dev->admin_queue.running_state;
1491 }
1492 
1493 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1494 {
1495 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1496 	unsigned long flags = 0;
1497 
1498 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1499 	ena_dev->admin_queue.running_state = state;
1500 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1501 }
1502 
1503 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1504 {
1505 	u16 depth = ena_dev->aenq.q_depth;
1506 
1507 	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1508 
1509 	/* Init head_db to mark that all entries in the queue
1510 	 * are initially available
1511 	 */
1512 	writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1513 }
1514 
1515 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1516 {
1517 	struct ena_com_admin_queue *admin_queue;
1518 	struct ena_admin_set_feat_cmd cmd;
1519 	struct ena_admin_set_feat_resp resp;
1520 	struct ena_admin_get_feat_resp get_resp;
1521 	int ret;
1522 
1523 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1524 	if (ret) {
1525 		pr_info("Can't get aenq configuration\n");
1526 		return ret;
1527 	}
1528 
1529 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1530 		pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1531 			get_resp.u.aenq.supported_groups, groups_flag);
1532 		return -EOPNOTSUPP;
1533 	}
1534 
1535 	memset(&cmd, 0x0, sizeof(cmd));
1536 	admin_queue = &ena_dev->admin_queue;
1537 
1538 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1539 	cmd.aq_common_descriptor.flags = 0;
1540 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1541 	cmd.u.aenq.enabled_groups = groups_flag;
1542 
1543 	ret = ena_com_execute_admin_command(admin_queue,
1544 					    (struct ena_admin_aq_entry *)&cmd,
1545 					    sizeof(cmd),
1546 					    (struct ena_admin_acq_entry *)&resp,
1547 					    sizeof(resp));
1548 
1549 	if (unlikely(ret))
1550 		pr_err("Failed to config AENQ ret: %d\n", ret);
1551 
1552 	return ret;
1553 }
1554 
1555 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1556 {
1557 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1558 	int width;
1559 
1560 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1561 		pr_err("Reg read timeout occurred\n");
1562 		return -ETIME;
1563 	}
1564 
1565 	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1566 		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1567 
1568 	pr_debug("ENA dma width: %d\n", width);
1569 
1570 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1571 		pr_err("DMA width illegal value: %d\n", width);
1572 		return -EINVAL;
1573 	}
1574 
1575 	ena_dev->dma_addr_bits = width;
1576 
1577 	return width;
1578 }
1579 
1580 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1581 {
1582 	u32 ver;
1583 	u32 ctrl_ver;
1584 	u32 ctrl_ver_masked;
1585 
1586 	/* Make sure the ENA version and the controller version are at least
1587 	 * as the driver expects
1588 	 */
1589 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1590 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1591 					  ENA_REGS_CONTROLLER_VERSION_OFF);
1592 
1593 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1594 		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1595 		pr_err("Reg read timeout occurred\n");
1596 		return -ETIME;
1597 	}
1598 
1599 	pr_info("ena device version: %d.%d\n",
1600 		(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1601 			ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1602 		ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1603 
1604 	pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1605 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1606 			ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1607 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1608 			ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1609 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1610 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1611 			ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1612 
1613 	ctrl_ver_masked =
1614 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1615 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1616 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1617 
1618 	/* Validate the ctrl version without the implementation ID */
1619 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1620 		pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1621 		return -1;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1628 {
1629 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1630 	struct ena_com_admin_cq *cq = &admin_queue->cq;
1631 	struct ena_com_admin_sq *sq = &admin_queue->sq;
1632 	struct ena_com_aenq *aenq = &ena_dev->aenq;
1633 	u16 size;
1634 
1635 	if (admin_queue->comp_ctx)
1636 		devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1637 	admin_queue->comp_ctx = NULL;
1638 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1639 	if (sq->entries)
1640 		dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1641 				  sq->dma_addr);
1642 	sq->entries = NULL;
1643 
1644 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1645 	if (cq->entries)
1646 		dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1647 				  cq->dma_addr);
1648 	cq->entries = NULL;
1649 
1650 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1651 	if (ena_dev->aenq.entries)
1652 		dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1653 				  aenq->dma_addr);
1654 	aenq->entries = NULL;
1655 }
1656 
1657 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1658 {
1659 	u32 mask_value = 0;
1660 
1661 	if (polling)
1662 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1663 
1664 	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1665 	ena_dev->admin_queue.polling = polling;
1666 }
1667 
1668 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1669 					 bool polling)
1670 {
1671 	ena_dev->admin_queue.auto_polling = polling;
1672 }
1673 
1674 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1675 {
1676 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1677 
1678 	spin_lock_init(&mmio_read->lock);
1679 	mmio_read->read_resp =
1680 		dma_alloc_coherent(ena_dev->dmadev,
1681 				   sizeof(*mmio_read->read_resp),
1682 				   &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1683 	if (unlikely(!mmio_read->read_resp))
1684 		goto err;
1685 
1686 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1687 
1688 	mmio_read->read_resp->req_id = 0x0;
1689 	mmio_read->seq_num = 0x0;
1690 	mmio_read->readless_supported = true;
1691 
1692 	return 0;
1693 
1694 err:
1695 
1696 	return -ENOMEM;
1697 }
1698 
1699 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1700 {
1701 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1702 
1703 	mmio_read->readless_supported = readless_supported;
1704 }
1705 
1706 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1707 {
1708 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1709 
1710 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1711 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1712 
1713 	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1714 			  mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1715 
1716 	mmio_read->read_resp = NULL;
1717 }
1718 
1719 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1720 {
1721 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1722 	u32 addr_low, addr_high;
1723 
1724 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1725 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1726 
1727 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1728 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1729 }
1730 
1731 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1732 		       struct ena_aenq_handlers *aenq_handlers)
1733 {
1734 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1735 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1736 	int ret;
1737 
1738 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1739 
1740 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1741 		pr_err("Reg read timeout occurred\n");
1742 		return -ETIME;
1743 	}
1744 
1745 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1746 		pr_err("Device isn't ready, abort com init\n");
1747 		return -ENODEV;
1748 	}
1749 
1750 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1751 
1752 	admin_queue->q_dmadev = ena_dev->dmadev;
1753 	admin_queue->polling = false;
1754 	admin_queue->curr_cmd_id = 0;
1755 
1756 	atomic_set(&admin_queue->outstanding_cmds, 0);
1757 
1758 	spin_lock_init(&admin_queue->q_lock);
1759 
1760 	ret = ena_com_init_comp_ctxt(admin_queue);
1761 	if (ret)
1762 		goto error;
1763 
1764 	ret = ena_com_admin_init_sq(admin_queue);
1765 	if (ret)
1766 		goto error;
1767 
1768 	ret = ena_com_admin_init_cq(admin_queue);
1769 	if (ret)
1770 		goto error;
1771 
1772 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1773 		ENA_REGS_AQ_DB_OFF);
1774 
1775 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1776 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1777 
1778 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1779 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1780 
1781 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1782 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1783 
1784 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1785 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1786 
1787 	aq_caps = 0;
1788 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1789 	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1790 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1791 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1792 
1793 	acq_caps = 0;
1794 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1795 	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1796 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1797 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1798 
1799 	writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1800 	writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1801 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1802 	if (ret)
1803 		goto error;
1804 
1805 	admin_queue->running_state = true;
1806 
1807 	return 0;
1808 error:
1809 	ena_com_admin_destroy(ena_dev);
1810 
1811 	return ret;
1812 }
1813 
1814 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1815 			    struct ena_com_create_io_ctx *ctx)
1816 {
1817 	struct ena_com_io_sq *io_sq;
1818 	struct ena_com_io_cq *io_cq;
1819 	int ret;
1820 
1821 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1822 		pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1823 		       ctx->qid, ENA_TOTAL_NUM_QUEUES);
1824 		return -EINVAL;
1825 	}
1826 
1827 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1828 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1829 
1830 	memset(io_sq, 0x0, sizeof(*io_sq));
1831 	memset(io_cq, 0x0, sizeof(*io_cq));
1832 
1833 	/* Init CQ */
1834 	io_cq->q_depth = ctx->queue_size;
1835 	io_cq->direction = ctx->direction;
1836 	io_cq->qid = ctx->qid;
1837 
1838 	io_cq->msix_vector = ctx->msix_vector;
1839 
1840 	io_sq->q_depth = ctx->queue_size;
1841 	io_sq->direction = ctx->direction;
1842 	io_sq->qid = ctx->qid;
1843 
1844 	io_sq->mem_queue_type = ctx->mem_queue_type;
1845 
1846 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1847 		/* header length is limited to 8 bits */
1848 		io_sq->tx_max_header_size =
1849 			min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1850 
1851 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1852 	if (ret)
1853 		goto error;
1854 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1855 	if (ret)
1856 		goto error;
1857 
1858 	ret = ena_com_create_io_cq(ena_dev, io_cq);
1859 	if (ret)
1860 		goto error;
1861 
1862 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1863 	if (ret)
1864 		goto destroy_io_cq;
1865 
1866 	return 0;
1867 
1868 destroy_io_cq:
1869 	ena_com_destroy_io_cq(ena_dev, io_cq);
1870 error:
1871 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1872 	return ret;
1873 }
1874 
1875 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1876 {
1877 	struct ena_com_io_sq *io_sq;
1878 	struct ena_com_io_cq *io_cq;
1879 
1880 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1881 		pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1882 		       ENA_TOTAL_NUM_QUEUES);
1883 		return;
1884 	}
1885 
1886 	io_sq = &ena_dev->io_sq_queues[qid];
1887 	io_cq = &ena_dev->io_cq_queues[qid];
1888 
1889 	ena_com_destroy_io_sq(ena_dev, io_sq);
1890 	ena_com_destroy_io_cq(ena_dev, io_cq);
1891 
1892 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1893 }
1894 
1895 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1896 			    struct ena_admin_get_feat_resp *resp)
1897 {
1898 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1899 }
1900 
1901 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1902 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
1903 {
1904 	struct ena_admin_get_feat_resp get_resp;
1905 	int rc;
1906 
1907 	rc = ena_com_get_feature(ena_dev, &get_resp,
1908 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1909 	if (rc)
1910 		return rc;
1911 
1912 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1913 	       sizeof(get_resp.u.dev_attr));
1914 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1915 
1916 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1917 		rc = ena_com_get_feature(ena_dev, &get_resp,
1918 					 ENA_ADMIN_MAX_QUEUES_EXT,
1919 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1920 		if (rc)
1921 			return rc;
1922 
1923 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1924 			return -EINVAL;
1925 
1926 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1927 		       sizeof(get_resp.u.max_queue_ext));
1928 		ena_dev->tx_max_header_size =
1929 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1930 	} else {
1931 		rc = ena_com_get_feature(ena_dev, &get_resp,
1932 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1933 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1934 		       sizeof(get_resp.u.max_queue));
1935 		ena_dev->tx_max_header_size =
1936 			get_resp.u.max_queue.max_header_size;
1937 
1938 		if (rc)
1939 			return rc;
1940 	}
1941 
1942 	rc = ena_com_get_feature(ena_dev, &get_resp,
1943 				 ENA_ADMIN_AENQ_CONFIG, 0);
1944 	if (rc)
1945 		return rc;
1946 
1947 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1948 	       sizeof(get_resp.u.aenq));
1949 
1950 	rc = ena_com_get_feature(ena_dev, &get_resp,
1951 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1952 	if (rc)
1953 		return rc;
1954 
1955 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1956 	       sizeof(get_resp.u.offload));
1957 
1958 	/* Driver hints isn't mandatory admin command. So in case the
1959 	 * command isn't supported set driver hints to 0
1960 	 */
1961 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1962 
1963 	if (!rc)
1964 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1965 		       sizeof(get_resp.u.hw_hints));
1966 	else if (rc == -EOPNOTSUPP)
1967 		memset(&get_feat_ctx->hw_hints, 0x0,
1968 		       sizeof(get_feat_ctx->hw_hints));
1969 	else
1970 		return rc;
1971 
1972 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1973 	if (!rc)
1974 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1975 		       sizeof(get_resp.u.llq));
1976 	else if (rc == -EOPNOTSUPP)
1977 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1978 	else
1979 		return rc;
1980 
1981 	return 0;
1982 }
1983 
1984 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1985 {
1986 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
1987 }
1988 
1989 /* ena_handle_specific_aenq_event:
1990  * return the handler that is relevant to the specific event group
1991  */
1992 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1993 						     u16 group)
1994 {
1995 	struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1996 
1997 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1998 		return aenq_handlers->handlers[group];
1999 
2000 	return aenq_handlers->unimplemented_handler;
2001 }
2002 
2003 /* ena_aenq_intr_handler:
2004  * handles the aenq incoming events.
2005  * pop events from the queue and apply the specific handler
2006  */
2007 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2008 {
2009 	struct ena_admin_aenq_entry *aenq_e;
2010 	struct ena_admin_aenq_common_desc *aenq_common;
2011 	struct ena_com_aenq *aenq  = &dev->aenq;
2012 	unsigned long long timestamp;
2013 	ena_aenq_handler handler_cb;
2014 	u16 masked_head, processed = 0;
2015 	u8 phase;
2016 
2017 	masked_head = aenq->head & (aenq->q_depth - 1);
2018 	phase = aenq->phase;
2019 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2020 	aenq_common = &aenq_e->aenq_common_desc;
2021 
2022 	/* Go over all the events */
2023 	while ((READ_ONCE(aenq_common->flags) &
2024 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2025 		/* Make sure the phase bit (ownership) is as expected before
2026 		 * reading the rest of the descriptor.
2027 		 */
2028 		dma_rmb();
2029 
2030 		timestamp =
2031 			(unsigned long long)aenq_common->timestamp_low |
2032 			((unsigned long long)aenq_common->timestamp_high << 32);
2033 		pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2034 			 aenq_common->group, aenq_common->syndrom, timestamp);
2035 
2036 		/* Handle specific event*/
2037 		handler_cb = ena_com_get_specific_aenq_cb(dev,
2038 							  aenq_common->group);
2039 		handler_cb(data, aenq_e); /* call the actual event handler*/
2040 
2041 		/* Get next event entry */
2042 		masked_head++;
2043 		processed++;
2044 
2045 		if (unlikely(masked_head == aenq->q_depth)) {
2046 			masked_head = 0;
2047 			phase = !phase;
2048 		}
2049 		aenq_e = &aenq->entries[masked_head];
2050 		aenq_common = &aenq_e->aenq_common_desc;
2051 	}
2052 
2053 	aenq->head += processed;
2054 	aenq->phase = phase;
2055 
2056 	/* Don't update aenq doorbell if there weren't any processed events */
2057 	if (!processed)
2058 		return;
2059 
2060 	/* write the aenq doorbell after all AENQ descriptors were read */
2061 	mb();
2062 	writel_relaxed((u32)aenq->head,
2063 		       dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2064 }
2065 
2066 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2067 		      enum ena_regs_reset_reason_types reset_reason)
2068 {
2069 	u32 stat, timeout, cap, reset_val;
2070 	int rc;
2071 
2072 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2073 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2074 
2075 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2076 		     (cap == ENA_MMIO_READ_TIMEOUT))) {
2077 		pr_err("Reg read32 timeout occurred\n");
2078 		return -ETIME;
2079 	}
2080 
2081 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2082 		pr_err("Device isn't ready, can't reset device\n");
2083 		return -EINVAL;
2084 	}
2085 
2086 	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2087 			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2088 	if (timeout == 0) {
2089 		pr_err("Invalid timeout value\n");
2090 		return -EINVAL;
2091 	}
2092 
2093 	/* start reset */
2094 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2095 	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2096 		     ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2097 	writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2098 
2099 	/* Write again the MMIO read request address */
2100 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2101 
2102 	rc = wait_for_reset_state(ena_dev, timeout,
2103 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2104 	if (rc != 0) {
2105 		pr_err("Reset indication didn't turn on\n");
2106 		return rc;
2107 	}
2108 
2109 	/* reset done */
2110 	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2111 	rc = wait_for_reset_state(ena_dev, timeout, 0);
2112 	if (rc != 0) {
2113 		pr_err("Reset indication didn't turn off\n");
2114 		return rc;
2115 	}
2116 
2117 	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2118 		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2119 	if (timeout)
2120 		/* the resolution of timeout reg is 100ms */
2121 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2122 	else
2123 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2124 
2125 	return 0;
2126 }
2127 
2128 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2129 			     struct ena_com_stats_ctx *ctx,
2130 			     enum ena_admin_get_stats_type type)
2131 {
2132 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2133 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2134 	struct ena_com_admin_queue *admin_queue;
2135 	int ret;
2136 
2137 	admin_queue = &ena_dev->admin_queue;
2138 
2139 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2140 	get_cmd->aq_common_descriptor.flags = 0;
2141 	get_cmd->type = type;
2142 
2143 	ret =  ena_com_execute_admin_command(admin_queue,
2144 					     (struct ena_admin_aq_entry *)get_cmd,
2145 					     sizeof(*get_cmd),
2146 					     (struct ena_admin_acq_entry *)get_resp,
2147 					     sizeof(*get_resp));
2148 
2149 	if (unlikely(ret))
2150 		pr_err("Failed to get stats. error: %d\n", ret);
2151 
2152 	return ret;
2153 }
2154 
2155 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2156 				struct ena_admin_basic_stats *stats)
2157 {
2158 	struct ena_com_stats_ctx ctx;
2159 	int ret;
2160 
2161 	memset(&ctx, 0x0, sizeof(ctx));
2162 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2163 	if (likely(ret == 0))
2164 		memcpy(stats, &ctx.get_resp.basic_stats,
2165 		       sizeof(ctx.get_resp.basic_stats));
2166 
2167 	return ret;
2168 }
2169 
2170 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2171 {
2172 	struct ena_com_admin_queue *admin_queue;
2173 	struct ena_admin_set_feat_cmd cmd;
2174 	struct ena_admin_set_feat_resp resp;
2175 	int ret;
2176 
2177 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2178 		pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2179 		return -EOPNOTSUPP;
2180 	}
2181 
2182 	memset(&cmd, 0x0, sizeof(cmd));
2183 	admin_queue = &ena_dev->admin_queue;
2184 
2185 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2186 	cmd.aq_common_descriptor.flags = 0;
2187 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2188 	cmd.u.mtu.mtu = mtu;
2189 
2190 	ret = ena_com_execute_admin_command(admin_queue,
2191 					    (struct ena_admin_aq_entry *)&cmd,
2192 					    sizeof(cmd),
2193 					    (struct ena_admin_acq_entry *)&resp,
2194 					    sizeof(resp));
2195 
2196 	if (unlikely(ret))
2197 		pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2198 
2199 	return ret;
2200 }
2201 
2202 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2203 				 struct ena_admin_feature_offload_desc *offload)
2204 {
2205 	int ret;
2206 	struct ena_admin_get_feat_resp resp;
2207 
2208 	ret = ena_com_get_feature(ena_dev, &resp,
2209 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2210 	if (unlikely(ret)) {
2211 		pr_err("Failed to get offload capabilities %d\n", ret);
2212 		return ret;
2213 	}
2214 
2215 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2216 
2217 	return 0;
2218 }
2219 
2220 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2221 {
2222 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2223 	struct ena_rss *rss = &ena_dev->rss;
2224 	struct ena_admin_set_feat_cmd cmd;
2225 	struct ena_admin_set_feat_resp resp;
2226 	struct ena_admin_get_feat_resp get_resp;
2227 	int ret;
2228 
2229 	if (!ena_com_check_supported_feature_id(ena_dev,
2230 						ENA_ADMIN_RSS_HASH_FUNCTION)) {
2231 		pr_debug("Feature %d isn't supported\n",
2232 			 ENA_ADMIN_RSS_HASH_FUNCTION);
2233 		return -EOPNOTSUPP;
2234 	}
2235 
2236 	/* Validate hash function is supported */
2237 	ret = ena_com_get_feature(ena_dev, &get_resp,
2238 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2239 	if (unlikely(ret))
2240 		return ret;
2241 
2242 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2243 		pr_err("Func hash %d isn't supported by device, abort\n",
2244 		       rss->hash_func);
2245 		return -EOPNOTSUPP;
2246 	}
2247 
2248 	memset(&cmd, 0x0, sizeof(cmd));
2249 
2250 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2251 	cmd.aq_common_descriptor.flags =
2252 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2253 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2254 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2255 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2256 
2257 	ret = ena_com_mem_addr_set(ena_dev,
2258 				   &cmd.control_buffer.address,
2259 				   rss->hash_key_dma_addr);
2260 	if (unlikely(ret)) {
2261 		pr_err("memory address set failed\n");
2262 		return ret;
2263 	}
2264 
2265 	cmd.control_buffer.length = sizeof(*rss->hash_key);
2266 
2267 	ret = ena_com_execute_admin_command(admin_queue,
2268 					    (struct ena_admin_aq_entry *)&cmd,
2269 					    sizeof(cmd),
2270 					    (struct ena_admin_acq_entry *)&resp,
2271 					    sizeof(resp));
2272 	if (unlikely(ret)) {
2273 		pr_err("Failed to set hash function %d. error: %d\n",
2274 		       rss->hash_func, ret);
2275 		return -EINVAL;
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2282 			       enum ena_admin_hash_functions func,
2283 			       const u8 *key, u16 key_len, u32 init_val)
2284 {
2285 	struct ena_rss *rss = &ena_dev->rss;
2286 	struct ena_admin_get_feat_resp get_resp;
2287 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2288 		rss->hash_key;
2289 	int rc;
2290 
2291 	/* Make sure size is a mult of DWs */
2292 	if (unlikely(key_len & 0x3))
2293 		return -EINVAL;
2294 
2295 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2296 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2297 				    rss->hash_key_dma_addr,
2298 				    sizeof(*rss->hash_key), 0);
2299 	if (unlikely(rc))
2300 		return rc;
2301 
2302 	if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2303 		pr_err("Flow hash function %d isn't supported\n", func);
2304 		return -EOPNOTSUPP;
2305 	}
2306 
2307 	switch (func) {
2308 	case ENA_ADMIN_TOEPLITZ:
2309 		if (key) {
2310 			if (key_len != sizeof(hash_key->key)) {
2311 				pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2312 				       key_len, sizeof(hash_key->key));
2313 				return -EINVAL;
2314 			}
2315 			memcpy(hash_key->key, key, key_len);
2316 			rss->hash_init_val = init_val;
2317 			hash_key->keys_num = key_len >> 2;
2318 		}
2319 		break;
2320 	case ENA_ADMIN_CRC32:
2321 		rss->hash_init_val = init_val;
2322 		break;
2323 	default:
2324 		pr_err("Invalid hash function (%d)\n", func);
2325 		return -EINVAL;
2326 	}
2327 
2328 	rss->hash_func = func;
2329 	rc = ena_com_set_hash_function(ena_dev);
2330 
2331 	/* Restore the old function */
2332 	if (unlikely(rc))
2333 		ena_com_get_hash_function(ena_dev, NULL, NULL);
2334 
2335 	return rc;
2336 }
2337 
2338 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2339 			      enum ena_admin_hash_functions *func,
2340 			      u8 *key)
2341 {
2342 	struct ena_rss *rss = &ena_dev->rss;
2343 	struct ena_admin_get_feat_resp get_resp;
2344 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2345 		rss->hash_key;
2346 	int rc;
2347 
2348 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2349 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2350 				    rss->hash_key_dma_addr,
2351 				    sizeof(*rss->hash_key), 0);
2352 	if (unlikely(rc))
2353 		return rc;
2354 
2355 	/* ffs() returns 1 in case the lsb is set */
2356 	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2357 	if (rss->hash_func)
2358 		rss->hash_func--;
2359 
2360 	if (func)
2361 		*func = rss->hash_func;
2362 
2363 	if (key)
2364 		memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2365 
2366 	return 0;
2367 }
2368 
2369 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2370 			  enum ena_admin_flow_hash_proto proto,
2371 			  u16 *fields)
2372 {
2373 	struct ena_rss *rss = &ena_dev->rss;
2374 	struct ena_admin_get_feat_resp get_resp;
2375 	int rc;
2376 
2377 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2378 				    ENA_ADMIN_RSS_HASH_INPUT,
2379 				    rss->hash_ctrl_dma_addr,
2380 				    sizeof(*rss->hash_ctrl), 0);
2381 	if (unlikely(rc))
2382 		return rc;
2383 
2384 	if (fields)
2385 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2386 
2387 	return 0;
2388 }
2389 
2390 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2391 {
2392 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2393 	struct ena_rss *rss = &ena_dev->rss;
2394 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2395 	struct ena_admin_set_feat_cmd cmd;
2396 	struct ena_admin_set_feat_resp resp;
2397 	int ret;
2398 
2399 	if (!ena_com_check_supported_feature_id(ena_dev,
2400 						ENA_ADMIN_RSS_HASH_INPUT)) {
2401 		pr_debug("Feature %d isn't supported\n",
2402 			 ENA_ADMIN_RSS_HASH_INPUT);
2403 		return -EOPNOTSUPP;
2404 	}
2405 
2406 	memset(&cmd, 0x0, sizeof(cmd));
2407 
2408 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2409 	cmd.aq_common_descriptor.flags =
2410 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2411 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2412 	cmd.u.flow_hash_input.enabled_input_sort =
2413 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2414 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2415 
2416 	ret = ena_com_mem_addr_set(ena_dev,
2417 				   &cmd.control_buffer.address,
2418 				   rss->hash_ctrl_dma_addr);
2419 	if (unlikely(ret)) {
2420 		pr_err("memory address set failed\n");
2421 		return ret;
2422 	}
2423 	cmd.control_buffer.length = sizeof(*hash_ctrl);
2424 
2425 	ret = ena_com_execute_admin_command(admin_queue,
2426 					    (struct ena_admin_aq_entry *)&cmd,
2427 					    sizeof(cmd),
2428 					    (struct ena_admin_acq_entry *)&resp,
2429 					    sizeof(resp));
2430 	if (unlikely(ret))
2431 		pr_err("Failed to set hash input. error: %d\n", ret);
2432 
2433 	return ret;
2434 }
2435 
2436 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2437 {
2438 	struct ena_rss *rss = &ena_dev->rss;
2439 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2440 		rss->hash_ctrl;
2441 	u16 available_fields = 0;
2442 	int rc, i;
2443 
2444 	/* Get the supported hash input */
2445 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2446 	if (unlikely(rc))
2447 		return rc;
2448 
2449 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2450 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2451 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2452 
2453 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2454 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2455 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2456 
2457 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2458 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2459 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2460 
2461 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2462 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2463 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2464 
2465 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2466 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2467 
2468 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2469 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2470 
2471 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2472 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2473 
2474 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2475 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2476 
2477 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2478 		available_fields = hash_ctrl->selected_fields[i].fields &
2479 				hash_ctrl->supported_fields[i].fields;
2480 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2481 			pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2482 			       i, hash_ctrl->supported_fields[i].fields,
2483 			       hash_ctrl->selected_fields[i].fields);
2484 			return -EOPNOTSUPP;
2485 		}
2486 	}
2487 
2488 	rc = ena_com_set_hash_ctrl(ena_dev);
2489 
2490 	/* In case of failure, restore the old hash ctrl */
2491 	if (unlikely(rc))
2492 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2493 
2494 	return rc;
2495 }
2496 
2497 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2498 			   enum ena_admin_flow_hash_proto proto,
2499 			   u16 hash_fields)
2500 {
2501 	struct ena_rss *rss = &ena_dev->rss;
2502 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2503 	u16 supported_fields;
2504 	int rc;
2505 
2506 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2507 		pr_err("Invalid proto num (%u)\n", proto);
2508 		return -EINVAL;
2509 	}
2510 
2511 	/* Get the ctrl table */
2512 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2513 	if (unlikely(rc))
2514 		return rc;
2515 
2516 	/* Make sure all the fields are supported */
2517 	supported_fields = hash_ctrl->supported_fields[proto].fields;
2518 	if ((hash_fields & supported_fields) != hash_fields) {
2519 		pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2520 		       proto, hash_fields, supported_fields);
2521 	}
2522 
2523 	hash_ctrl->selected_fields[proto].fields = hash_fields;
2524 
2525 	rc = ena_com_set_hash_ctrl(ena_dev);
2526 
2527 	/* In case of failure, restore the old hash ctrl */
2528 	if (unlikely(rc))
2529 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2530 
2531 	return 0;
2532 }
2533 
2534 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2535 				      u16 entry_idx, u16 entry_value)
2536 {
2537 	struct ena_rss *rss = &ena_dev->rss;
2538 
2539 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2540 		return -EINVAL;
2541 
2542 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2543 		return -EINVAL;
2544 
2545 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2546 
2547 	return 0;
2548 }
2549 
2550 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2551 {
2552 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2553 	struct ena_rss *rss = &ena_dev->rss;
2554 	struct ena_admin_set_feat_cmd cmd;
2555 	struct ena_admin_set_feat_resp resp;
2556 	int ret;
2557 
2558 	if (!ena_com_check_supported_feature_id(
2559 		    ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2560 		pr_debug("Feature %d isn't supported\n",
2561 			 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2562 		return -EOPNOTSUPP;
2563 	}
2564 
2565 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2566 	if (ret) {
2567 		pr_err("Failed to convert host indirection table to device table\n");
2568 		return ret;
2569 	}
2570 
2571 	memset(&cmd, 0x0, sizeof(cmd));
2572 
2573 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2574 	cmd.aq_common_descriptor.flags =
2575 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2576 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2577 	cmd.u.ind_table.size = rss->tbl_log_size;
2578 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2579 
2580 	ret = ena_com_mem_addr_set(ena_dev,
2581 				   &cmd.control_buffer.address,
2582 				   rss->rss_ind_tbl_dma_addr);
2583 	if (unlikely(ret)) {
2584 		pr_err("memory address set failed\n");
2585 		return ret;
2586 	}
2587 
2588 	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2589 		sizeof(struct ena_admin_rss_ind_table_entry);
2590 
2591 	ret = ena_com_execute_admin_command(admin_queue,
2592 					    (struct ena_admin_aq_entry *)&cmd,
2593 					    sizeof(cmd),
2594 					    (struct ena_admin_acq_entry *)&resp,
2595 					    sizeof(resp));
2596 
2597 	if (unlikely(ret))
2598 		pr_err("Failed to set indirect table. error: %d\n", ret);
2599 
2600 	return ret;
2601 }
2602 
2603 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2604 {
2605 	struct ena_rss *rss = &ena_dev->rss;
2606 	struct ena_admin_get_feat_resp get_resp;
2607 	u32 tbl_size;
2608 	int i, rc;
2609 
2610 	tbl_size = (1ULL << rss->tbl_log_size) *
2611 		sizeof(struct ena_admin_rss_ind_table_entry);
2612 
2613 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2614 				    ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2615 				    rss->rss_ind_tbl_dma_addr,
2616 				    tbl_size, 0);
2617 	if (unlikely(rc))
2618 		return rc;
2619 
2620 	if (!ind_tbl)
2621 		return 0;
2622 
2623 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2624 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2625 
2626 	return 0;
2627 }
2628 
2629 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2630 {
2631 	int rc;
2632 
2633 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2634 
2635 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2636 	if (unlikely(rc))
2637 		goto err_indr_tbl;
2638 
2639 	/* The following function might return unsupported in case the
2640 	 * device doesn't support setting the key / hash function. We can safely
2641 	 * ignore this error and have indirection table support only.
2642 	 */
2643 	rc = ena_com_hash_key_allocate(ena_dev);
2644 	if (unlikely(rc) && rc != -EOPNOTSUPP)
2645 		goto err_hash_key;
2646 	else if (rc != -EOPNOTSUPP)
2647 		ena_com_hash_key_fill_default_key(ena_dev);
2648 
2649 	rc = ena_com_hash_ctrl_init(ena_dev);
2650 	if (unlikely(rc))
2651 		goto err_hash_ctrl;
2652 
2653 	return 0;
2654 
2655 err_hash_ctrl:
2656 	ena_com_hash_key_destroy(ena_dev);
2657 err_hash_key:
2658 	ena_com_indirect_table_destroy(ena_dev);
2659 err_indr_tbl:
2660 
2661 	return rc;
2662 }
2663 
2664 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2665 {
2666 	ena_com_indirect_table_destroy(ena_dev);
2667 	ena_com_hash_key_destroy(ena_dev);
2668 	ena_com_hash_ctrl_destroy(ena_dev);
2669 
2670 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2671 }
2672 
2673 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2674 {
2675 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2676 
2677 	host_attr->host_info =
2678 		dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2679 				   &host_attr->host_info_dma_addr, GFP_KERNEL);
2680 	if (unlikely(!host_attr->host_info))
2681 		return -ENOMEM;
2682 
2683 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2684 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2685 		(ENA_COMMON_SPEC_VERSION_MINOR));
2686 
2687 	return 0;
2688 }
2689 
2690 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2691 				u32 debug_area_size)
2692 {
2693 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2694 
2695 	host_attr->debug_area_virt_addr =
2696 		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2697 				   &host_attr->debug_area_dma_addr,
2698 				   GFP_KERNEL);
2699 	if (unlikely(!host_attr->debug_area_virt_addr)) {
2700 		host_attr->debug_area_size = 0;
2701 		return -ENOMEM;
2702 	}
2703 
2704 	host_attr->debug_area_size = debug_area_size;
2705 
2706 	return 0;
2707 }
2708 
2709 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2710 {
2711 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2712 
2713 	if (host_attr->host_info) {
2714 		dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2715 				  host_attr->host_info_dma_addr);
2716 		host_attr->host_info = NULL;
2717 	}
2718 }
2719 
2720 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2721 {
2722 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2723 
2724 	if (host_attr->debug_area_virt_addr) {
2725 		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2726 				  host_attr->debug_area_virt_addr,
2727 				  host_attr->debug_area_dma_addr);
2728 		host_attr->debug_area_virt_addr = NULL;
2729 	}
2730 }
2731 
2732 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2733 {
2734 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2735 	struct ena_com_admin_queue *admin_queue;
2736 	struct ena_admin_set_feat_cmd cmd;
2737 	struct ena_admin_set_feat_resp resp;
2738 
2739 	int ret;
2740 
2741 	/* Host attribute config is called before ena_com_get_dev_attr_feat
2742 	 * so ena_com can't check if the feature is supported.
2743 	 */
2744 
2745 	memset(&cmd, 0x0, sizeof(cmd));
2746 	admin_queue = &ena_dev->admin_queue;
2747 
2748 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2749 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2750 
2751 	ret = ena_com_mem_addr_set(ena_dev,
2752 				   &cmd.u.host_attr.debug_ba,
2753 				   host_attr->debug_area_dma_addr);
2754 	if (unlikely(ret)) {
2755 		pr_err("memory address set failed\n");
2756 		return ret;
2757 	}
2758 
2759 	ret = ena_com_mem_addr_set(ena_dev,
2760 				   &cmd.u.host_attr.os_info_ba,
2761 				   host_attr->host_info_dma_addr);
2762 	if (unlikely(ret)) {
2763 		pr_err("memory address set failed\n");
2764 		return ret;
2765 	}
2766 
2767 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2768 
2769 	ret = ena_com_execute_admin_command(admin_queue,
2770 					    (struct ena_admin_aq_entry *)&cmd,
2771 					    sizeof(cmd),
2772 					    (struct ena_admin_acq_entry *)&resp,
2773 					    sizeof(resp));
2774 
2775 	if (unlikely(ret))
2776 		pr_err("Failed to set host attributes: %d\n", ret);
2777 
2778 	return ret;
2779 }
2780 
2781 /* Interrupt moderation */
2782 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2783 {
2784 	return ena_com_check_supported_feature_id(ena_dev,
2785 						  ENA_ADMIN_INTERRUPT_MODERATION);
2786 }
2787 
2788 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2789 							  u32 intr_delay_resolution,
2790 							  u32 *intr_moder_interval)
2791 {
2792 	if (!intr_delay_resolution) {
2793 		pr_err("Illegal interrupt delay granularity value\n");
2794 		return -EFAULT;
2795 	}
2796 
2797 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2798 
2799 	return 0;
2800 }
2801 
2802 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2803 						      u32 tx_coalesce_usecs)
2804 {
2805 	return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2806 							      ena_dev->intr_delay_resolution,
2807 							      &ena_dev->intr_moder_tx_interval);
2808 }
2809 
2810 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2811 						      u32 rx_coalesce_usecs)
2812 {
2813 	return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2814 							      ena_dev->intr_delay_resolution,
2815 							      &ena_dev->intr_moder_rx_interval);
2816 }
2817 
2818 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2819 {
2820 	struct ena_admin_get_feat_resp get_resp;
2821 	u16 delay_resolution;
2822 	int rc;
2823 
2824 	rc = ena_com_get_feature(ena_dev, &get_resp,
2825 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2826 
2827 	if (rc) {
2828 		if (rc == -EOPNOTSUPP) {
2829 			pr_debug("Feature %d isn't supported\n",
2830 				 ENA_ADMIN_INTERRUPT_MODERATION);
2831 			rc = 0;
2832 		} else {
2833 			pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2834 			       rc);
2835 		}
2836 
2837 		/* no moderation supported, disable adaptive support */
2838 		ena_com_disable_adaptive_moderation(ena_dev);
2839 		return rc;
2840 	}
2841 
2842 	/* if moderation is supported by device we set adaptive moderation */
2843 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2844 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2845 
2846 	/* Disable adaptive moderation by default - can be enabled later */
2847 	ena_com_disable_adaptive_moderation(ena_dev);
2848 
2849 	return 0;
2850 }
2851 
2852 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2853 {
2854 	return ena_dev->intr_moder_tx_interval;
2855 }
2856 
2857 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2858 {
2859 	return ena_dev->intr_moder_rx_interval;
2860 }
2861 
2862 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2863 			    struct ena_admin_feature_llq_desc *llq_features,
2864 			    struct ena_llq_configurations *llq_default_cfg)
2865 {
2866 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2867 	int rc;
2868 
2869 	if (!llq_features->max_llq_num) {
2870 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2871 		return 0;
2872 	}
2873 
2874 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2875 	if (rc)
2876 		return rc;
2877 
2878 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2879 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2880 
2881 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
2882 		pr_err("the size of the LLQ entry is smaller than needed\n");
2883 		return -EINVAL;
2884 	}
2885 
2886 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2887 
2888 	return 0;
2889 }
2890