1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include "ena_com.h"
7 
8 /*****************************************************************************/
9 /*****************************************************************************/
10 
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
13 
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
16 
17 
18 #define ENA_CTRL_MAJOR		0
19 #define ENA_CTRL_MINOR		0
20 #define ENA_CTRL_SUB_MINOR	1
21 
22 #define MIN_ENA_CTRL_VER \
23 	(((ENA_CTRL_MAJOR) << \
24 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 	((ENA_CTRL_MINOR) << \
26 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27 	(ENA_CTRL_SUB_MINOR))
28 
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
31 
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33 
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
35 
36 #define ENA_REGS_ADMIN_INTR_MASK 1
37 
38 #define ENA_MIN_ADMIN_POLL_US 100
39 
40 #define ENA_MAX_ADMIN_POLL_US 5000
41 
42 /*****************************************************************************/
43 /*****************************************************************************/
44 /*****************************************************************************/
45 
46 enum ena_cmd_status {
47 	ENA_CMD_SUBMITTED,
48 	ENA_CMD_COMPLETED,
49 	/* Abort - canceled by the driver */
50 	ENA_CMD_ABORTED,
51 };
52 
53 struct ena_comp_ctx {
54 	struct completion wait_event;
55 	struct ena_admin_acq_entry *user_cqe;
56 	u32 comp_size;
57 	enum ena_cmd_status status;
58 	/* status from the device */
59 	u8 comp_status;
60 	u8 cmd_opcode;
61 	bool occupied;
62 };
63 
64 struct ena_com_stats_ctx {
65 	struct ena_admin_aq_get_stats_cmd get_cmd;
66 	struct ena_admin_acq_get_stats_resp get_resp;
67 };
68 
69 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
70 				       struct ena_common_mem_addr *ena_addr,
71 				       dma_addr_t addr)
72 {
73 	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
74 		pr_err("DMA address has more bits that the device supports\n");
75 		return -EINVAL;
76 	}
77 
78 	ena_addr->mem_addr_low = lower_32_bits(addr);
79 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
80 
81 	return 0;
82 }
83 
84 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
85 {
86 	struct ena_com_admin_sq *sq = &admin_queue->sq;
87 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
88 
89 	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
90 					 &sq->dma_addr, GFP_KERNEL);
91 
92 	if (!sq->entries) {
93 		pr_err("Memory allocation failed\n");
94 		return -ENOMEM;
95 	}
96 
97 	sq->head = 0;
98 	sq->tail = 0;
99 	sq->phase = 1;
100 
101 	sq->db_addr = NULL;
102 
103 	return 0;
104 }
105 
106 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
107 {
108 	struct ena_com_admin_cq *cq = &admin_queue->cq;
109 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
110 
111 	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
112 					 &cq->dma_addr, GFP_KERNEL);
113 
114 	if (!cq->entries) {
115 		pr_err("Memory allocation failed\n");
116 		return -ENOMEM;
117 	}
118 
119 	cq->head = 0;
120 	cq->phase = 1;
121 
122 	return 0;
123 }
124 
125 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
126 				   struct ena_aenq_handlers *aenq_handlers)
127 {
128 	struct ena_com_aenq *aenq = &ena_dev->aenq;
129 	u32 addr_low, addr_high, aenq_caps;
130 	u16 size;
131 
132 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
133 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
134 	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
135 					   &aenq->dma_addr, GFP_KERNEL);
136 
137 	if (!aenq->entries) {
138 		pr_err("Memory allocation failed\n");
139 		return -ENOMEM;
140 	}
141 
142 	aenq->head = aenq->q_depth;
143 	aenq->phase = 1;
144 
145 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
146 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
147 
148 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
149 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
150 
151 	aenq_caps = 0;
152 	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
153 	aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
154 		      << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
155 		     ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
156 	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
157 
158 	if (unlikely(!aenq_handlers)) {
159 		pr_err("AENQ handlers pointer is NULL\n");
160 		return -EINVAL;
161 	}
162 
163 	aenq->aenq_handlers = aenq_handlers;
164 
165 	return 0;
166 }
167 
168 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
169 				     struct ena_comp_ctx *comp_ctx)
170 {
171 	comp_ctx->occupied = false;
172 	atomic_dec(&queue->outstanding_cmds);
173 }
174 
175 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
176 					  u16 command_id, bool capture)
177 {
178 	if (unlikely(command_id >= admin_queue->q_depth)) {
179 		pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
180 		       command_id, admin_queue->q_depth);
181 		return NULL;
182 	}
183 
184 	if (unlikely(!admin_queue->comp_ctx)) {
185 		pr_err("Completion context is NULL\n");
186 		return NULL;
187 	}
188 
189 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
190 		pr_err("Completion context is occupied\n");
191 		return NULL;
192 	}
193 
194 	if (capture) {
195 		atomic_inc(&admin_queue->outstanding_cmds);
196 		admin_queue->comp_ctx[command_id].occupied = true;
197 	}
198 
199 	return &admin_queue->comp_ctx[command_id];
200 }
201 
202 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
203 						       struct ena_admin_aq_entry *cmd,
204 						       size_t cmd_size_in_bytes,
205 						       struct ena_admin_acq_entry *comp,
206 						       size_t comp_size_in_bytes)
207 {
208 	struct ena_comp_ctx *comp_ctx;
209 	u16 tail_masked, cmd_id;
210 	u16 queue_size_mask;
211 	u16 cnt;
212 
213 	queue_size_mask = admin_queue->q_depth - 1;
214 
215 	tail_masked = admin_queue->sq.tail & queue_size_mask;
216 
217 	/* In case of queue FULL */
218 	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
219 	if (cnt >= admin_queue->q_depth) {
220 		pr_debug("Admin queue is full.\n");
221 		admin_queue->stats.out_of_space++;
222 		return ERR_PTR(-ENOSPC);
223 	}
224 
225 	cmd_id = admin_queue->curr_cmd_id;
226 
227 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
228 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
229 
230 	cmd->aq_common_descriptor.command_id |= cmd_id &
231 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
232 
233 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
234 	if (unlikely(!comp_ctx))
235 		return ERR_PTR(-EINVAL);
236 
237 	comp_ctx->status = ENA_CMD_SUBMITTED;
238 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
239 	comp_ctx->user_cqe = comp;
240 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
241 
242 	reinit_completion(&comp_ctx->wait_event);
243 
244 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
245 
246 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
247 		queue_size_mask;
248 
249 	admin_queue->sq.tail++;
250 	admin_queue->stats.submitted_cmd++;
251 
252 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
253 		admin_queue->sq.phase = !admin_queue->sq.phase;
254 
255 	writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
256 
257 	return comp_ctx;
258 }
259 
260 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
261 {
262 	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
263 	struct ena_comp_ctx *comp_ctx;
264 	u16 i;
265 
266 	admin_queue->comp_ctx =
267 		devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
268 	if (unlikely(!admin_queue->comp_ctx)) {
269 		pr_err("Memory allocation failed\n");
270 		return -ENOMEM;
271 	}
272 
273 	for (i = 0; i < admin_queue->q_depth; i++) {
274 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
275 		if (comp_ctx)
276 			init_completion(&comp_ctx->wait_event);
277 	}
278 
279 	return 0;
280 }
281 
282 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
283 						     struct ena_admin_aq_entry *cmd,
284 						     size_t cmd_size_in_bytes,
285 						     struct ena_admin_acq_entry *comp,
286 						     size_t comp_size_in_bytes)
287 {
288 	unsigned long flags = 0;
289 	struct ena_comp_ctx *comp_ctx;
290 
291 	spin_lock_irqsave(&admin_queue->q_lock, flags);
292 	if (unlikely(!admin_queue->running_state)) {
293 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
294 		return ERR_PTR(-ENODEV);
295 	}
296 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
297 					      cmd_size_in_bytes,
298 					      comp,
299 					      comp_size_in_bytes);
300 	if (IS_ERR(comp_ctx))
301 		admin_queue->running_state = false;
302 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
303 
304 	return comp_ctx;
305 }
306 
307 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
308 			      struct ena_com_create_io_ctx *ctx,
309 			      struct ena_com_io_sq *io_sq)
310 {
311 	size_t size;
312 	int dev_node = 0;
313 
314 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
315 
316 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
317 	io_sq->desc_entry_size =
318 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
319 		sizeof(struct ena_eth_io_tx_desc) :
320 		sizeof(struct ena_eth_io_rx_desc);
321 
322 	size = io_sq->desc_entry_size * io_sq->q_depth;
323 
324 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
325 		dev_node = dev_to_node(ena_dev->dmadev);
326 		set_dev_node(ena_dev->dmadev, ctx->numa_node);
327 		io_sq->desc_addr.virt_addr =
328 			dma_alloc_coherent(ena_dev->dmadev, size,
329 					   &io_sq->desc_addr.phys_addr,
330 					   GFP_KERNEL);
331 		set_dev_node(ena_dev->dmadev, dev_node);
332 		if (!io_sq->desc_addr.virt_addr) {
333 			io_sq->desc_addr.virt_addr =
334 				dma_alloc_coherent(ena_dev->dmadev, size,
335 						   &io_sq->desc_addr.phys_addr,
336 						   GFP_KERNEL);
337 		}
338 
339 		if (!io_sq->desc_addr.virt_addr) {
340 			pr_err("Memory allocation failed\n");
341 			return -ENOMEM;
342 		}
343 	}
344 
345 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
346 		/* Allocate bounce buffers */
347 		io_sq->bounce_buf_ctrl.buffer_size =
348 			ena_dev->llq_info.desc_list_entry_size;
349 		io_sq->bounce_buf_ctrl.buffers_num =
350 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
351 		io_sq->bounce_buf_ctrl.next_to_use = 0;
352 
353 		size = io_sq->bounce_buf_ctrl.buffer_size *
354 			io_sq->bounce_buf_ctrl.buffers_num;
355 
356 		dev_node = dev_to_node(ena_dev->dmadev);
357 		set_dev_node(ena_dev->dmadev, ctx->numa_node);
358 		io_sq->bounce_buf_ctrl.base_buffer =
359 			devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
360 		set_dev_node(ena_dev->dmadev, dev_node);
361 		if (!io_sq->bounce_buf_ctrl.base_buffer)
362 			io_sq->bounce_buf_ctrl.base_buffer =
363 				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
364 
365 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
366 			pr_err("Bounce buffer memory allocation failed\n");
367 			return -ENOMEM;
368 		}
369 
370 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
371 		       sizeof(io_sq->llq_info));
372 
373 		/* Initiate the first bounce buffer */
374 		io_sq->llq_buf_ctrl.curr_bounce_buf =
375 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
376 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
377 		       0x0, io_sq->llq_info.desc_list_entry_size);
378 		io_sq->llq_buf_ctrl.descs_left_in_line =
379 			io_sq->llq_info.descs_num_before_header;
380 		io_sq->disable_meta_caching =
381 			io_sq->llq_info.disable_meta_caching;
382 
383 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
384 			io_sq->entries_in_tx_burst_left =
385 				io_sq->llq_info.max_entries_in_tx_burst;
386 	}
387 
388 	io_sq->tail = 0;
389 	io_sq->next_to_comp = 0;
390 	io_sq->phase = 1;
391 
392 	return 0;
393 }
394 
395 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
396 			      struct ena_com_create_io_ctx *ctx,
397 			      struct ena_com_io_cq *io_cq)
398 {
399 	size_t size;
400 	int prev_node = 0;
401 
402 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
403 
404 	/* Use the basic completion descriptor for Rx */
405 	io_cq->cdesc_entry_size_in_bytes =
406 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
407 		sizeof(struct ena_eth_io_tx_cdesc) :
408 		sizeof(struct ena_eth_io_rx_cdesc_base);
409 
410 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
411 
412 	prev_node = dev_to_node(ena_dev->dmadev);
413 	set_dev_node(ena_dev->dmadev, ctx->numa_node);
414 	io_cq->cdesc_addr.virt_addr =
415 		dma_alloc_coherent(ena_dev->dmadev, size,
416 				   &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
417 	set_dev_node(ena_dev->dmadev, prev_node);
418 	if (!io_cq->cdesc_addr.virt_addr) {
419 		io_cq->cdesc_addr.virt_addr =
420 			dma_alloc_coherent(ena_dev->dmadev, size,
421 					   &io_cq->cdesc_addr.phys_addr,
422 					   GFP_KERNEL);
423 	}
424 
425 	if (!io_cq->cdesc_addr.virt_addr) {
426 		pr_err("Memory allocation failed\n");
427 		return -ENOMEM;
428 	}
429 
430 	io_cq->phase = 1;
431 	io_cq->head = 0;
432 
433 	return 0;
434 }
435 
436 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
437 						   struct ena_admin_acq_entry *cqe)
438 {
439 	struct ena_comp_ctx *comp_ctx;
440 	u16 cmd_id;
441 
442 	cmd_id = cqe->acq_common_descriptor.command &
443 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
444 
445 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
446 	if (unlikely(!comp_ctx)) {
447 		pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
448 		admin_queue->running_state = false;
449 		return;
450 	}
451 
452 	comp_ctx->status = ENA_CMD_COMPLETED;
453 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
454 
455 	if (comp_ctx->user_cqe)
456 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
457 
458 	if (!admin_queue->polling)
459 		complete(&comp_ctx->wait_event);
460 }
461 
462 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
463 {
464 	struct ena_admin_acq_entry *cqe = NULL;
465 	u16 comp_num = 0;
466 	u16 head_masked;
467 	u8 phase;
468 
469 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
470 	phase = admin_queue->cq.phase;
471 
472 	cqe = &admin_queue->cq.entries[head_masked];
473 
474 	/* Go over all the completions */
475 	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
476 		ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
477 		/* Do not read the rest of the completion entry before the
478 		 * phase bit was validated
479 		 */
480 		dma_rmb();
481 		ena_com_handle_single_admin_completion(admin_queue, cqe);
482 
483 		head_masked++;
484 		comp_num++;
485 		if (unlikely(head_masked == admin_queue->q_depth)) {
486 			head_masked = 0;
487 			phase = !phase;
488 		}
489 
490 		cqe = &admin_queue->cq.entries[head_masked];
491 	}
492 
493 	admin_queue->cq.head += comp_num;
494 	admin_queue->cq.phase = phase;
495 	admin_queue->sq.head += comp_num;
496 	admin_queue->stats.completed_cmd += comp_num;
497 }
498 
499 static int ena_com_comp_status_to_errno(u8 comp_status)
500 {
501 	if (unlikely(comp_status != 0))
502 		pr_err("Admin command failed[%u]\n", comp_status);
503 
504 	switch (comp_status) {
505 	case ENA_ADMIN_SUCCESS:
506 		return 0;
507 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
508 		return -ENOMEM;
509 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
510 		return -EOPNOTSUPP;
511 	case ENA_ADMIN_BAD_OPCODE:
512 	case ENA_ADMIN_MALFORMED_REQUEST:
513 	case ENA_ADMIN_ILLEGAL_PARAMETER:
514 	case ENA_ADMIN_UNKNOWN_ERROR:
515 		return -EINVAL;
516 	case ENA_ADMIN_RESOURCE_BUSY:
517 		return -EAGAIN;
518 	}
519 
520 	return -EINVAL;
521 }
522 
523 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
524 {
525 	delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
526 	delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
527 	usleep_range(delay_us, 2 * delay_us);
528 }
529 
530 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
531 						     struct ena_com_admin_queue *admin_queue)
532 {
533 	unsigned long flags = 0;
534 	unsigned long timeout;
535 	int ret;
536 	u32 exp = 0;
537 
538 	timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
539 
540 	while (1) {
541 		spin_lock_irqsave(&admin_queue->q_lock, flags);
542 		ena_com_handle_admin_completion(admin_queue);
543 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
544 
545 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
546 			break;
547 
548 		if (time_is_before_jiffies(timeout)) {
549 			pr_err("Wait for completion (polling) timeout\n");
550 			/* ENA didn't have any completion */
551 			spin_lock_irqsave(&admin_queue->q_lock, flags);
552 			admin_queue->stats.no_completion++;
553 			admin_queue->running_state = false;
554 			spin_unlock_irqrestore(&admin_queue->q_lock, flags);
555 
556 			ret = -ETIME;
557 			goto err;
558 		}
559 
560 		ena_delay_exponential_backoff_us(exp++,
561 						 admin_queue->ena_dev->ena_min_poll_delay_us);
562 	}
563 
564 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
565 		pr_err("Command was aborted\n");
566 		spin_lock_irqsave(&admin_queue->q_lock, flags);
567 		admin_queue->stats.aborted_cmd++;
568 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
569 		ret = -ENODEV;
570 		goto err;
571 	}
572 
573 	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
574 	     comp_ctx->status);
575 
576 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
577 err:
578 	comp_ctxt_release(admin_queue, comp_ctx);
579 	return ret;
580 }
581 
582 /*
583  * Set the LLQ configurations of the firmware
584  *
585  * The driver provides only the enabled feature values to the device,
586  * which in turn, checks if they are supported.
587  */
588 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
589 {
590 	struct ena_com_admin_queue *admin_queue;
591 	struct ena_admin_set_feat_cmd cmd;
592 	struct ena_admin_set_feat_resp resp;
593 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
594 	int ret;
595 
596 	memset(&cmd, 0x0, sizeof(cmd));
597 	admin_queue = &ena_dev->admin_queue;
598 
599 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
600 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
601 
602 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
603 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
604 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
605 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
606 
607 	cmd.u.llq.accel_mode.u.set.enabled_flags =
608 		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
609 		BIT(ENA_ADMIN_LIMIT_TX_BURST);
610 
611 	ret = ena_com_execute_admin_command(admin_queue,
612 					    (struct ena_admin_aq_entry *)&cmd,
613 					    sizeof(cmd),
614 					    (struct ena_admin_acq_entry *)&resp,
615 					    sizeof(resp));
616 
617 	if (unlikely(ret))
618 		pr_err("Failed to set LLQ configurations: %d\n", ret);
619 
620 	return ret;
621 }
622 
623 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
624 				   struct ena_admin_feature_llq_desc *llq_features,
625 				   struct ena_llq_configurations *llq_default_cfg)
626 {
627 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
628 	struct ena_admin_accel_mode_get llq_accel_mode_get;
629 	u16 supported_feat;
630 	int rc;
631 
632 	memset(llq_info, 0, sizeof(*llq_info));
633 
634 	supported_feat = llq_features->header_location_ctrl_supported;
635 
636 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
637 		llq_info->header_location_ctrl =
638 			llq_default_cfg->llq_header_location;
639 	} else {
640 		pr_err("Invalid header location control, supported: 0x%x\n",
641 		       supported_feat);
642 		return -EINVAL;
643 	}
644 
645 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
646 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
647 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
648 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
649 		} else	{
650 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
651 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
652 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
653 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
654 			} else {
655 				pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
656 				       supported_feat);
657 				return -EINVAL;
658 			}
659 
660 			pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
661 			       llq_default_cfg->llq_stride_ctrl, supported_feat,
662 			       llq_info->desc_stride_ctrl);
663 		}
664 	} else {
665 		llq_info->desc_stride_ctrl = 0;
666 	}
667 
668 	supported_feat = llq_features->entry_size_ctrl_supported;
669 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
670 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
671 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
672 	} else {
673 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
674 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
675 			llq_info->desc_list_entry_size = 128;
676 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
677 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
678 			llq_info->desc_list_entry_size = 192;
679 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
680 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
681 			llq_info->desc_list_entry_size = 256;
682 		} else {
683 			pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
684 			       supported_feat);
685 			return -EINVAL;
686 		}
687 
688 		pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
689 		       llq_default_cfg->llq_ring_entry_size, supported_feat,
690 		       llq_info->desc_list_entry_size);
691 	}
692 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
693 		/* The desc list entry size should be whole multiply of 8
694 		 * This requirement comes from __iowrite64_copy()
695 		 */
696 		pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
697 		return -EINVAL;
698 	}
699 
700 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
701 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
702 			sizeof(struct ena_eth_io_tx_desc);
703 	else
704 		llq_info->descs_per_entry = 1;
705 
706 	supported_feat = llq_features->desc_num_before_header_supported;
707 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
708 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
709 	} else {
710 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
711 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
712 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
713 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
714 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
715 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
716 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
717 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
718 		} else {
719 			pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
720 			       supported_feat);
721 			return -EINVAL;
722 		}
723 
724 		pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
725 		       llq_default_cfg->llq_num_decs_before_header,
726 		       supported_feat, llq_info->descs_num_before_header);
727 	}
728 	/* Check for accelerated queue supported */
729 	llq_accel_mode_get = llq_features->accel_mode.u.get;
730 
731 	llq_info->disable_meta_caching =
732 		!!(llq_accel_mode_get.supported_flags &
733 		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
734 
735 	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
736 		llq_info->max_entries_in_tx_burst =
737 			llq_accel_mode_get.max_tx_burst_size /
738 			llq_default_cfg->llq_ring_entry_size_value;
739 
740 	rc = ena_com_set_llq(ena_dev);
741 	if (rc)
742 		pr_err("Cannot set LLQ configuration: %d\n", rc);
743 
744 	return rc;
745 }
746 
747 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
748 							struct ena_com_admin_queue *admin_queue)
749 {
750 	unsigned long flags = 0;
751 	int ret;
752 
753 	wait_for_completion_timeout(&comp_ctx->wait_event,
754 				    usecs_to_jiffies(
755 					    admin_queue->completion_timeout));
756 
757 	/* In case the command wasn't completed find out the root cause.
758 	 * There might be 2 kinds of errors
759 	 * 1) No completion (timeout reached)
760 	 * 2) There is completion but the device didn't get any msi-x interrupt.
761 	 */
762 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
763 		spin_lock_irqsave(&admin_queue->q_lock, flags);
764 		ena_com_handle_admin_completion(admin_queue);
765 		admin_queue->stats.no_completion++;
766 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
767 
768 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
769 			pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
770 			       comp_ctx->cmd_opcode,
771 			       admin_queue->auto_polling ? "ON" : "OFF");
772 			/* Check if fallback to polling is enabled */
773 			if (admin_queue->auto_polling)
774 				admin_queue->polling = true;
775 		} else {
776 			pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
777 			       comp_ctx->cmd_opcode, comp_ctx->status);
778 		}
779 		/* Check if shifted to polling mode.
780 		 * This will happen if there is a completion without an interrupt
781 		 * and autopolling mode is enabled. Continuing normal execution in such case
782 		 */
783 		if (!admin_queue->polling) {
784 			admin_queue->running_state = false;
785 			ret = -ETIME;
786 			goto err;
787 		}
788 	}
789 
790 	ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
791 err:
792 	comp_ctxt_release(admin_queue, comp_ctx);
793 	return ret;
794 }
795 
796 /* This method read the hardware device register through posting writes
797  * and waiting for response
798  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
799  */
800 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
801 {
802 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
803 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
804 		mmio_read->read_resp;
805 	u32 mmio_read_reg, ret, i;
806 	unsigned long flags = 0;
807 	u32 timeout = mmio_read->reg_read_to;
808 
809 	might_sleep();
810 
811 	if (timeout == 0)
812 		timeout = ENA_REG_READ_TIMEOUT;
813 
814 	/* If readless is disabled, perform regular read */
815 	if (!mmio_read->readless_supported)
816 		return readl(ena_dev->reg_bar + offset);
817 
818 	spin_lock_irqsave(&mmio_read->lock, flags);
819 	mmio_read->seq_num++;
820 
821 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
822 	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
823 			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
824 	mmio_read_reg |= mmio_read->seq_num &
825 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
826 
827 	writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
828 
829 	for (i = 0; i < timeout; i++) {
830 		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
831 			break;
832 
833 		udelay(1);
834 	}
835 
836 	if (unlikely(i == timeout)) {
837 		pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
838 		       mmio_read->seq_num, offset, read_resp->req_id,
839 		       read_resp->reg_off);
840 		ret = ENA_MMIO_READ_TIMEOUT;
841 		goto err;
842 	}
843 
844 	if (read_resp->reg_off != offset) {
845 		pr_err("Read failure: wrong offset provided\n");
846 		ret = ENA_MMIO_READ_TIMEOUT;
847 	} else {
848 		ret = read_resp->reg_val;
849 	}
850 err:
851 	spin_unlock_irqrestore(&mmio_read->lock, flags);
852 
853 	return ret;
854 }
855 
856 /* There are two types to wait for completion.
857  * Polling mode - wait until the completion is available.
858  * Async mode - wait on wait queue until the completion is ready
859  * (or the timeout expired).
860  * It is expected that the IRQ called ena_com_handle_admin_completion
861  * to mark the completions.
862  */
863 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
864 					     struct ena_com_admin_queue *admin_queue)
865 {
866 	if (admin_queue->polling)
867 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
868 								 admin_queue);
869 
870 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
871 							    admin_queue);
872 }
873 
874 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
875 				 struct ena_com_io_sq *io_sq)
876 {
877 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
878 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
879 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
880 	u8 direction;
881 	int ret;
882 
883 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
884 
885 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
886 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
887 	else
888 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
889 
890 	destroy_cmd.sq.sq_identity |= (direction <<
891 		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
892 		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
893 
894 	destroy_cmd.sq.sq_idx = io_sq->idx;
895 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
896 
897 	ret = ena_com_execute_admin_command(admin_queue,
898 					    (struct ena_admin_aq_entry *)&destroy_cmd,
899 					    sizeof(destroy_cmd),
900 					    (struct ena_admin_acq_entry *)&destroy_resp,
901 					    sizeof(destroy_resp));
902 
903 	if (unlikely(ret && (ret != -ENODEV)))
904 		pr_err("Failed to destroy io sq error: %d\n", ret);
905 
906 	return ret;
907 }
908 
909 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
910 				  struct ena_com_io_sq *io_sq,
911 				  struct ena_com_io_cq *io_cq)
912 {
913 	size_t size;
914 
915 	if (io_cq->cdesc_addr.virt_addr) {
916 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
917 
918 		dma_free_coherent(ena_dev->dmadev, size,
919 				  io_cq->cdesc_addr.virt_addr,
920 				  io_cq->cdesc_addr.phys_addr);
921 
922 		io_cq->cdesc_addr.virt_addr = NULL;
923 	}
924 
925 	if (io_sq->desc_addr.virt_addr) {
926 		size = io_sq->desc_entry_size * io_sq->q_depth;
927 
928 		dma_free_coherent(ena_dev->dmadev, size,
929 				  io_sq->desc_addr.virt_addr,
930 				  io_sq->desc_addr.phys_addr);
931 
932 		io_sq->desc_addr.virt_addr = NULL;
933 	}
934 
935 	if (io_sq->bounce_buf_ctrl.base_buffer) {
936 		devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
937 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
938 	}
939 }
940 
941 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
942 				u16 exp_state)
943 {
944 	u32 val, exp = 0;
945 	unsigned long timeout_stamp;
946 
947 	/* Convert timeout from resolution of 100ms to us resolution. */
948 	timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
949 
950 	while (1) {
951 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
952 
953 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
954 			pr_err("Reg read timeout occurred\n");
955 			return -ETIME;
956 		}
957 
958 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
959 			exp_state)
960 			return 0;
961 
962 		if (time_is_before_jiffies(timeout_stamp))
963 			return -ETIME;
964 
965 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
966 	}
967 }
968 
969 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
970 					       enum ena_admin_aq_feature_id feature_id)
971 {
972 	u32 feature_mask = 1 << feature_id;
973 
974 	/* Device attributes is always supported */
975 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
976 	    !(ena_dev->supported_features & feature_mask))
977 		return false;
978 
979 	return true;
980 }
981 
982 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
983 				  struct ena_admin_get_feat_resp *get_resp,
984 				  enum ena_admin_aq_feature_id feature_id,
985 				  dma_addr_t control_buf_dma_addr,
986 				  u32 control_buff_size,
987 				  u8 feature_ver)
988 {
989 	struct ena_com_admin_queue *admin_queue;
990 	struct ena_admin_get_feat_cmd get_cmd;
991 	int ret;
992 
993 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
994 		pr_debug("Feature %d isn't supported\n", feature_id);
995 		return -EOPNOTSUPP;
996 	}
997 
998 	memset(&get_cmd, 0x0, sizeof(get_cmd));
999 	admin_queue = &ena_dev->admin_queue;
1000 
1001 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1002 
1003 	if (control_buff_size)
1004 		get_cmd.aq_common_descriptor.flags =
1005 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1006 	else
1007 		get_cmd.aq_common_descriptor.flags = 0;
1008 
1009 	ret = ena_com_mem_addr_set(ena_dev,
1010 				   &get_cmd.control_buffer.address,
1011 				   control_buf_dma_addr);
1012 	if (unlikely(ret)) {
1013 		pr_err("Memory address set failed\n");
1014 		return ret;
1015 	}
1016 
1017 	get_cmd.control_buffer.length = control_buff_size;
1018 	get_cmd.feat_common.feature_version = feature_ver;
1019 	get_cmd.feat_common.feature_id = feature_id;
1020 
1021 	ret = ena_com_execute_admin_command(admin_queue,
1022 					    (struct ena_admin_aq_entry *)
1023 					    &get_cmd,
1024 					    sizeof(get_cmd),
1025 					    (struct ena_admin_acq_entry *)
1026 					    get_resp,
1027 					    sizeof(*get_resp));
1028 
1029 	if (unlikely(ret))
1030 		pr_err("Failed to submit get_feature command %d error: %d\n",
1031 		       feature_id, ret);
1032 
1033 	return ret;
1034 }
1035 
1036 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1037 			       struct ena_admin_get_feat_resp *get_resp,
1038 			       enum ena_admin_aq_feature_id feature_id,
1039 			       u8 feature_ver)
1040 {
1041 	return ena_com_get_feature_ex(ena_dev,
1042 				      get_resp,
1043 				      feature_id,
1044 				      0,
1045 				      0,
1046 				      feature_ver);
1047 }
1048 
1049 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1050 {
1051 	return ena_dev->rss.hash_func;
1052 }
1053 
1054 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1055 {
1056 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1057 		(ena_dev->rss).hash_key;
1058 
1059 	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1060 	/* The key buffer is stored in the device in an array of
1061 	 * uint32 elements.
1062 	 */
1063 	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1064 }
1065 
1066 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1067 {
1068 	struct ena_rss *rss = &ena_dev->rss;
1069 
1070 	if (!ena_com_check_supported_feature_id(ena_dev,
1071 						ENA_ADMIN_RSS_HASH_FUNCTION))
1072 		return -EOPNOTSUPP;
1073 
1074 	rss->hash_key =
1075 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1076 				   &rss->hash_key_dma_addr, GFP_KERNEL);
1077 
1078 	if (unlikely(!rss->hash_key))
1079 		return -ENOMEM;
1080 
1081 	return 0;
1082 }
1083 
1084 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1085 {
1086 	struct ena_rss *rss = &ena_dev->rss;
1087 
1088 	if (rss->hash_key)
1089 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1090 				  rss->hash_key, rss->hash_key_dma_addr);
1091 	rss->hash_key = NULL;
1092 }
1093 
1094 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1095 {
1096 	struct ena_rss *rss = &ena_dev->rss;
1097 
1098 	rss->hash_ctrl =
1099 		dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1100 				   &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1101 
1102 	if (unlikely(!rss->hash_ctrl))
1103 		return -ENOMEM;
1104 
1105 	return 0;
1106 }
1107 
1108 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1109 {
1110 	struct ena_rss *rss = &ena_dev->rss;
1111 
1112 	if (rss->hash_ctrl)
1113 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1114 				  rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1115 	rss->hash_ctrl = NULL;
1116 }
1117 
1118 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1119 					   u16 log_size)
1120 {
1121 	struct ena_rss *rss = &ena_dev->rss;
1122 	struct ena_admin_get_feat_resp get_resp;
1123 	size_t tbl_size;
1124 	int ret;
1125 
1126 	ret = ena_com_get_feature(ena_dev, &get_resp,
1127 				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1128 	if (unlikely(ret))
1129 		return ret;
1130 
1131 	if ((get_resp.u.ind_table.min_size > log_size) ||
1132 	    (get_resp.u.ind_table.max_size < log_size)) {
1133 		pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1134 		       1 << log_size, 1 << get_resp.u.ind_table.min_size,
1135 		       1 << get_resp.u.ind_table.max_size);
1136 		return -EINVAL;
1137 	}
1138 
1139 	tbl_size = (1ULL << log_size) *
1140 		sizeof(struct ena_admin_rss_ind_table_entry);
1141 
1142 	rss->rss_ind_tbl =
1143 		dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1144 				   &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1145 	if (unlikely(!rss->rss_ind_tbl))
1146 		goto mem_err1;
1147 
1148 	tbl_size = (1ULL << log_size) * sizeof(u16);
1149 	rss->host_rss_ind_tbl =
1150 		devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1151 	if (unlikely(!rss->host_rss_ind_tbl))
1152 		goto mem_err2;
1153 
1154 	rss->tbl_log_size = log_size;
1155 
1156 	return 0;
1157 
1158 mem_err2:
1159 	tbl_size = (1ULL << log_size) *
1160 		sizeof(struct ena_admin_rss_ind_table_entry);
1161 
1162 	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1163 			  rss->rss_ind_tbl_dma_addr);
1164 	rss->rss_ind_tbl = NULL;
1165 mem_err1:
1166 	rss->tbl_log_size = 0;
1167 	return -ENOMEM;
1168 }
1169 
1170 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1171 {
1172 	struct ena_rss *rss = &ena_dev->rss;
1173 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1174 		sizeof(struct ena_admin_rss_ind_table_entry);
1175 
1176 	if (rss->rss_ind_tbl)
1177 		dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1178 				  rss->rss_ind_tbl_dma_addr);
1179 	rss->rss_ind_tbl = NULL;
1180 
1181 	if (rss->host_rss_ind_tbl)
1182 		devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1183 	rss->host_rss_ind_tbl = NULL;
1184 }
1185 
1186 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1187 				struct ena_com_io_sq *io_sq, u16 cq_idx)
1188 {
1189 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1190 	struct ena_admin_aq_create_sq_cmd create_cmd;
1191 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1192 	u8 direction;
1193 	int ret;
1194 
1195 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1196 
1197 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1198 
1199 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1200 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1201 	else
1202 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1203 
1204 	create_cmd.sq_identity |= (direction <<
1205 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1206 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1207 
1208 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1209 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1210 
1211 	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1212 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1213 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1214 
1215 	create_cmd.sq_caps_3 |=
1216 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1217 
1218 	create_cmd.cq_idx = cq_idx;
1219 	create_cmd.sq_depth = io_sq->q_depth;
1220 
1221 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1222 		ret = ena_com_mem_addr_set(ena_dev,
1223 					   &create_cmd.sq_ba,
1224 					   io_sq->desc_addr.phys_addr);
1225 		if (unlikely(ret)) {
1226 			pr_err("Memory address set failed\n");
1227 			return ret;
1228 		}
1229 	}
1230 
1231 	ret = ena_com_execute_admin_command(admin_queue,
1232 					    (struct ena_admin_aq_entry *)&create_cmd,
1233 					    sizeof(create_cmd),
1234 					    (struct ena_admin_acq_entry *)&cmd_completion,
1235 					    sizeof(cmd_completion));
1236 	if (unlikely(ret)) {
1237 		pr_err("Failed to create IO SQ. error: %d\n", ret);
1238 		return ret;
1239 	}
1240 
1241 	io_sq->idx = cmd_completion.sq_idx;
1242 
1243 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1244 		(uintptr_t)cmd_completion.sq_doorbell_offset);
1245 
1246 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1247 		io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1248 				+ cmd_completion.llq_headers_offset);
1249 
1250 		io_sq->desc_addr.pbuf_dev_addr =
1251 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1252 			cmd_completion.llq_descriptors_offset);
1253 	}
1254 
1255 	pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1256 
1257 	return ret;
1258 }
1259 
1260 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1261 {
1262 	struct ena_rss *rss = &ena_dev->rss;
1263 	struct ena_com_io_sq *io_sq;
1264 	u16 qid;
1265 	int i;
1266 
1267 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1268 		qid = rss->host_rss_ind_tbl[i];
1269 		if (qid >= ENA_TOTAL_NUM_QUEUES)
1270 			return -EINVAL;
1271 
1272 		io_sq = &ena_dev->io_sq_queues[qid];
1273 
1274 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1275 			return -EINVAL;
1276 
1277 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1284 						 u16 intr_delay_resolution)
1285 {
1286 	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1287 
1288 	if (unlikely(!intr_delay_resolution)) {
1289 		pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1290 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1291 	}
1292 
1293 	/* update Rx */
1294 	ena_dev->intr_moder_rx_interval =
1295 		ena_dev->intr_moder_rx_interval *
1296 		prev_intr_delay_resolution /
1297 		intr_delay_resolution;
1298 
1299 	/* update Tx */
1300 	ena_dev->intr_moder_tx_interval =
1301 		ena_dev->intr_moder_tx_interval *
1302 		prev_intr_delay_resolution /
1303 		intr_delay_resolution;
1304 
1305 	ena_dev->intr_delay_resolution = intr_delay_resolution;
1306 }
1307 
1308 /*****************************************************************************/
1309 /*******************************      API       ******************************/
1310 /*****************************************************************************/
1311 
1312 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1313 				  struct ena_admin_aq_entry *cmd,
1314 				  size_t cmd_size,
1315 				  struct ena_admin_acq_entry *comp,
1316 				  size_t comp_size)
1317 {
1318 	struct ena_comp_ctx *comp_ctx;
1319 	int ret;
1320 
1321 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1322 					    comp, comp_size);
1323 	if (IS_ERR(comp_ctx)) {
1324 		if (comp_ctx == ERR_PTR(-ENODEV))
1325 			pr_debug("Failed to submit command [%ld]\n",
1326 				 PTR_ERR(comp_ctx));
1327 		else
1328 			pr_err("Failed to submit command [%ld]\n",
1329 			       PTR_ERR(comp_ctx));
1330 
1331 		return PTR_ERR(comp_ctx);
1332 	}
1333 
1334 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1335 	if (unlikely(ret)) {
1336 		if (admin_queue->running_state)
1337 			pr_err("Failed to process command. ret = %d\n", ret);
1338 		else
1339 			pr_debug("Failed to process command. ret = %d\n", ret);
1340 	}
1341 	return ret;
1342 }
1343 
1344 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1345 			 struct ena_com_io_cq *io_cq)
1346 {
1347 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1348 	struct ena_admin_aq_create_cq_cmd create_cmd;
1349 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1350 	int ret;
1351 
1352 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1353 
1354 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1355 
1356 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1357 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1358 	create_cmd.cq_caps_1 |=
1359 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1360 
1361 	create_cmd.msix_vector = io_cq->msix_vector;
1362 	create_cmd.cq_depth = io_cq->q_depth;
1363 
1364 	ret = ena_com_mem_addr_set(ena_dev,
1365 				   &create_cmd.cq_ba,
1366 				   io_cq->cdesc_addr.phys_addr);
1367 	if (unlikely(ret)) {
1368 		pr_err("Memory address set failed\n");
1369 		return ret;
1370 	}
1371 
1372 	ret = ena_com_execute_admin_command(admin_queue,
1373 					    (struct ena_admin_aq_entry *)&create_cmd,
1374 					    sizeof(create_cmd),
1375 					    (struct ena_admin_acq_entry *)&cmd_completion,
1376 					    sizeof(cmd_completion));
1377 	if (unlikely(ret)) {
1378 		pr_err("Failed to create IO CQ. error: %d\n", ret);
1379 		return ret;
1380 	}
1381 
1382 	io_cq->idx = cmd_completion.cq_idx;
1383 
1384 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1385 		cmd_completion.cq_interrupt_unmask_register_offset);
1386 
1387 	if (cmd_completion.cq_head_db_register_offset)
1388 		io_cq->cq_head_db_reg =
1389 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1390 			cmd_completion.cq_head_db_register_offset);
1391 
1392 	if (cmd_completion.numa_node_register_offset)
1393 		io_cq->numa_node_cfg_reg =
1394 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1395 			cmd_completion.numa_node_register_offset);
1396 
1397 	pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1398 
1399 	return ret;
1400 }
1401 
1402 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1403 			    struct ena_com_io_sq **io_sq,
1404 			    struct ena_com_io_cq **io_cq)
1405 {
1406 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1407 		pr_err("Invalid queue number %d but the max is %d\n", qid,
1408 		       ENA_TOTAL_NUM_QUEUES);
1409 		return -EINVAL;
1410 	}
1411 
1412 	*io_sq = &ena_dev->io_sq_queues[qid];
1413 	*io_cq = &ena_dev->io_cq_queues[qid];
1414 
1415 	return 0;
1416 }
1417 
1418 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1419 {
1420 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1421 	struct ena_comp_ctx *comp_ctx;
1422 	u16 i;
1423 
1424 	if (!admin_queue->comp_ctx)
1425 		return;
1426 
1427 	for (i = 0; i < admin_queue->q_depth; i++) {
1428 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1429 		if (unlikely(!comp_ctx))
1430 			break;
1431 
1432 		comp_ctx->status = ENA_CMD_ABORTED;
1433 
1434 		complete(&comp_ctx->wait_event);
1435 	}
1436 }
1437 
1438 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1439 {
1440 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1441 	unsigned long flags = 0;
1442 	u32 exp = 0;
1443 
1444 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1445 	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1446 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1447 		ena_delay_exponential_backoff_us(exp++,
1448 						 ena_dev->ena_min_poll_delay_us);
1449 		spin_lock_irqsave(&admin_queue->q_lock, flags);
1450 	}
1451 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1452 }
1453 
1454 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1455 			  struct ena_com_io_cq *io_cq)
1456 {
1457 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1459 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1460 	int ret;
1461 
1462 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1463 
1464 	destroy_cmd.cq_idx = io_cq->idx;
1465 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1466 
1467 	ret = ena_com_execute_admin_command(admin_queue,
1468 					    (struct ena_admin_aq_entry *)&destroy_cmd,
1469 					    sizeof(destroy_cmd),
1470 					    (struct ena_admin_acq_entry *)&destroy_resp,
1471 					    sizeof(destroy_resp));
1472 
1473 	if (unlikely(ret && (ret != -ENODEV)))
1474 		pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1475 
1476 	return ret;
1477 }
1478 
1479 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1480 {
1481 	return ena_dev->admin_queue.running_state;
1482 }
1483 
1484 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1485 {
1486 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1487 	unsigned long flags = 0;
1488 
1489 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1490 	ena_dev->admin_queue.running_state = state;
1491 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1492 }
1493 
1494 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1495 {
1496 	u16 depth = ena_dev->aenq.q_depth;
1497 
1498 	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1499 
1500 	/* Init head_db to mark that all entries in the queue
1501 	 * are initially available
1502 	 */
1503 	writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1504 }
1505 
1506 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1507 {
1508 	struct ena_com_admin_queue *admin_queue;
1509 	struct ena_admin_set_feat_cmd cmd;
1510 	struct ena_admin_set_feat_resp resp;
1511 	struct ena_admin_get_feat_resp get_resp;
1512 	int ret;
1513 
1514 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1515 	if (ret) {
1516 		pr_info("Can't get aenq configuration\n");
1517 		return ret;
1518 	}
1519 
1520 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1521 		pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1522 			get_resp.u.aenq.supported_groups, groups_flag);
1523 		return -EOPNOTSUPP;
1524 	}
1525 
1526 	memset(&cmd, 0x0, sizeof(cmd));
1527 	admin_queue = &ena_dev->admin_queue;
1528 
1529 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1530 	cmd.aq_common_descriptor.flags = 0;
1531 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1532 	cmd.u.aenq.enabled_groups = groups_flag;
1533 
1534 	ret = ena_com_execute_admin_command(admin_queue,
1535 					    (struct ena_admin_aq_entry *)&cmd,
1536 					    sizeof(cmd),
1537 					    (struct ena_admin_acq_entry *)&resp,
1538 					    sizeof(resp));
1539 
1540 	if (unlikely(ret))
1541 		pr_err("Failed to config AENQ ret: %d\n", ret);
1542 
1543 	return ret;
1544 }
1545 
1546 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1547 {
1548 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1549 	int width;
1550 
1551 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1552 		pr_err("Reg read timeout occurred\n");
1553 		return -ETIME;
1554 	}
1555 
1556 	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1557 		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1558 
1559 	pr_debug("ENA dma width: %d\n", width);
1560 
1561 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1562 		pr_err("DMA width illegal value: %d\n", width);
1563 		return -EINVAL;
1564 	}
1565 
1566 	ena_dev->dma_addr_bits = width;
1567 
1568 	return width;
1569 }
1570 
1571 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1572 {
1573 	u32 ver;
1574 	u32 ctrl_ver;
1575 	u32 ctrl_ver_masked;
1576 
1577 	/* Make sure the ENA version and the controller version are at least
1578 	 * as the driver expects
1579 	 */
1580 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1581 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1582 					  ENA_REGS_CONTROLLER_VERSION_OFF);
1583 
1584 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1585 		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1586 		pr_err("Reg read timeout occurred\n");
1587 		return -ETIME;
1588 	}
1589 
1590 	pr_info("ENA device version: %d.%d\n",
1591 		(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1592 			ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1593 		ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1594 
1595 	pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
1596 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1597 			ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1598 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1599 			ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1600 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1601 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1602 			ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1603 
1604 	ctrl_ver_masked =
1605 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1606 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1607 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1608 
1609 	/* Validate the ctrl version without the implementation ID */
1610 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1611 		pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1612 		return -1;
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 static void
1619 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1620 				      struct ena_com_admin_queue *admin_queue)
1621 
1622 {
1623 	if (!admin_queue->comp_ctx)
1624 		return;
1625 
1626 	devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1627 
1628 	admin_queue->comp_ctx = NULL;
1629 }
1630 
1631 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1632 {
1633 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1634 	struct ena_com_admin_cq *cq = &admin_queue->cq;
1635 	struct ena_com_admin_sq *sq = &admin_queue->sq;
1636 	struct ena_com_aenq *aenq = &ena_dev->aenq;
1637 	u16 size;
1638 
1639 	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1640 
1641 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1642 	if (sq->entries)
1643 		dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1644 				  sq->dma_addr);
1645 	sq->entries = NULL;
1646 
1647 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1648 	if (cq->entries)
1649 		dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1650 				  cq->dma_addr);
1651 	cq->entries = NULL;
1652 
1653 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1654 	if (ena_dev->aenq.entries)
1655 		dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1656 				  aenq->dma_addr);
1657 	aenq->entries = NULL;
1658 }
1659 
1660 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1661 {
1662 	u32 mask_value = 0;
1663 
1664 	if (polling)
1665 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1666 
1667 	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1668 	ena_dev->admin_queue.polling = polling;
1669 }
1670 
1671 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1672 					 bool polling)
1673 {
1674 	ena_dev->admin_queue.auto_polling = polling;
1675 }
1676 
1677 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1678 {
1679 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1680 
1681 	spin_lock_init(&mmio_read->lock);
1682 	mmio_read->read_resp =
1683 		dma_alloc_coherent(ena_dev->dmadev,
1684 				   sizeof(*mmio_read->read_resp),
1685 				   &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1686 	if (unlikely(!mmio_read->read_resp))
1687 		goto err;
1688 
1689 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1690 
1691 	mmio_read->read_resp->req_id = 0x0;
1692 	mmio_read->seq_num = 0x0;
1693 	mmio_read->readless_supported = true;
1694 
1695 	return 0;
1696 
1697 err:
1698 
1699 	return -ENOMEM;
1700 }
1701 
1702 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1703 {
1704 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1705 
1706 	mmio_read->readless_supported = readless_supported;
1707 }
1708 
1709 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1710 {
1711 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1712 
1713 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1714 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1715 
1716 	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1717 			  mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1718 
1719 	mmio_read->read_resp = NULL;
1720 }
1721 
1722 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1723 {
1724 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1725 	u32 addr_low, addr_high;
1726 
1727 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1728 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1729 
1730 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1731 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1732 }
1733 
1734 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1735 		       struct ena_aenq_handlers *aenq_handlers)
1736 {
1737 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1738 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1739 	int ret;
1740 
1741 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1742 
1743 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1744 		pr_err("Reg read timeout occurred\n");
1745 		return -ETIME;
1746 	}
1747 
1748 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1749 		pr_err("Device isn't ready, abort com init\n");
1750 		return -ENODEV;
1751 	}
1752 
1753 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1754 
1755 	admin_queue->q_dmadev = ena_dev->dmadev;
1756 	admin_queue->polling = false;
1757 	admin_queue->curr_cmd_id = 0;
1758 
1759 	atomic_set(&admin_queue->outstanding_cmds, 0);
1760 
1761 	spin_lock_init(&admin_queue->q_lock);
1762 
1763 	ret = ena_com_init_comp_ctxt(admin_queue);
1764 	if (ret)
1765 		goto error;
1766 
1767 	ret = ena_com_admin_init_sq(admin_queue);
1768 	if (ret)
1769 		goto error;
1770 
1771 	ret = ena_com_admin_init_cq(admin_queue);
1772 	if (ret)
1773 		goto error;
1774 
1775 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1776 		ENA_REGS_AQ_DB_OFF);
1777 
1778 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1779 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1780 
1781 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1782 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1783 
1784 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1785 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1786 
1787 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1788 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1789 
1790 	aq_caps = 0;
1791 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1792 	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1793 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1794 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1795 
1796 	acq_caps = 0;
1797 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1798 	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1799 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1800 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1801 
1802 	writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1803 	writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1804 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1805 	if (ret)
1806 		goto error;
1807 
1808 	admin_queue->ena_dev = ena_dev;
1809 	admin_queue->running_state = true;
1810 
1811 	return 0;
1812 error:
1813 	ena_com_admin_destroy(ena_dev);
1814 
1815 	return ret;
1816 }
1817 
1818 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1819 			    struct ena_com_create_io_ctx *ctx)
1820 {
1821 	struct ena_com_io_sq *io_sq;
1822 	struct ena_com_io_cq *io_cq;
1823 	int ret;
1824 
1825 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1826 		pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1827 		       ctx->qid, ENA_TOTAL_NUM_QUEUES);
1828 		return -EINVAL;
1829 	}
1830 
1831 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1832 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1833 
1834 	memset(io_sq, 0x0, sizeof(*io_sq));
1835 	memset(io_cq, 0x0, sizeof(*io_cq));
1836 
1837 	/* Init CQ */
1838 	io_cq->q_depth = ctx->queue_size;
1839 	io_cq->direction = ctx->direction;
1840 	io_cq->qid = ctx->qid;
1841 
1842 	io_cq->msix_vector = ctx->msix_vector;
1843 
1844 	io_sq->q_depth = ctx->queue_size;
1845 	io_sq->direction = ctx->direction;
1846 	io_sq->qid = ctx->qid;
1847 
1848 	io_sq->mem_queue_type = ctx->mem_queue_type;
1849 
1850 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1851 		/* header length is limited to 8 bits */
1852 		io_sq->tx_max_header_size =
1853 			min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1854 
1855 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1856 	if (ret)
1857 		goto error;
1858 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1859 	if (ret)
1860 		goto error;
1861 
1862 	ret = ena_com_create_io_cq(ena_dev, io_cq);
1863 	if (ret)
1864 		goto error;
1865 
1866 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1867 	if (ret)
1868 		goto destroy_io_cq;
1869 
1870 	return 0;
1871 
1872 destroy_io_cq:
1873 	ena_com_destroy_io_cq(ena_dev, io_cq);
1874 error:
1875 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1876 	return ret;
1877 }
1878 
1879 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1880 {
1881 	struct ena_com_io_sq *io_sq;
1882 	struct ena_com_io_cq *io_cq;
1883 
1884 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1885 		pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1886 		       ENA_TOTAL_NUM_QUEUES);
1887 		return;
1888 	}
1889 
1890 	io_sq = &ena_dev->io_sq_queues[qid];
1891 	io_cq = &ena_dev->io_cq_queues[qid];
1892 
1893 	ena_com_destroy_io_sq(ena_dev, io_sq);
1894 	ena_com_destroy_io_cq(ena_dev, io_cq);
1895 
1896 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1897 }
1898 
1899 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1900 			    struct ena_admin_get_feat_resp *resp)
1901 {
1902 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1903 }
1904 
1905 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1906 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
1907 {
1908 	struct ena_admin_get_feat_resp get_resp;
1909 	int rc;
1910 
1911 	rc = ena_com_get_feature(ena_dev, &get_resp,
1912 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1913 	if (rc)
1914 		return rc;
1915 
1916 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1917 	       sizeof(get_resp.u.dev_attr));
1918 
1919 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1920 
1921 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1922 		rc = ena_com_get_feature(ena_dev, &get_resp,
1923 					 ENA_ADMIN_MAX_QUEUES_EXT,
1924 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1925 		if (rc)
1926 			return rc;
1927 
1928 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1929 			return -EINVAL;
1930 
1931 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1932 		       sizeof(get_resp.u.max_queue_ext));
1933 		ena_dev->tx_max_header_size =
1934 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1935 	} else {
1936 		rc = ena_com_get_feature(ena_dev, &get_resp,
1937 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1938 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1939 		       sizeof(get_resp.u.max_queue));
1940 		ena_dev->tx_max_header_size =
1941 			get_resp.u.max_queue.max_header_size;
1942 
1943 		if (rc)
1944 			return rc;
1945 	}
1946 
1947 	rc = ena_com_get_feature(ena_dev, &get_resp,
1948 				 ENA_ADMIN_AENQ_CONFIG, 0);
1949 	if (rc)
1950 		return rc;
1951 
1952 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1953 	       sizeof(get_resp.u.aenq));
1954 
1955 	rc = ena_com_get_feature(ena_dev, &get_resp,
1956 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1957 	if (rc)
1958 		return rc;
1959 
1960 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1961 	       sizeof(get_resp.u.offload));
1962 
1963 	/* Driver hints isn't mandatory admin command. So in case the
1964 	 * command isn't supported set driver hints to 0
1965 	 */
1966 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1967 
1968 	if (!rc)
1969 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1970 		       sizeof(get_resp.u.hw_hints));
1971 	else if (rc == -EOPNOTSUPP)
1972 		memset(&get_feat_ctx->hw_hints, 0x0,
1973 		       sizeof(get_feat_ctx->hw_hints));
1974 	else
1975 		return rc;
1976 
1977 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1978 	if (!rc)
1979 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1980 		       sizeof(get_resp.u.llq));
1981 	else if (rc == -EOPNOTSUPP)
1982 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1983 	else
1984 		return rc;
1985 
1986 	return 0;
1987 }
1988 
1989 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1990 {
1991 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
1992 }
1993 
1994 /* ena_handle_specific_aenq_event:
1995  * return the handler that is relevant to the specific event group
1996  */
1997 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
1998 						     u16 group)
1999 {
2000 	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2001 
2002 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2003 		return aenq_handlers->handlers[group];
2004 
2005 	return aenq_handlers->unimplemented_handler;
2006 }
2007 
2008 /* ena_aenq_intr_handler:
2009  * handles the aenq incoming events.
2010  * pop events from the queue and apply the specific handler
2011  */
2012 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2013 {
2014 	struct ena_admin_aenq_entry *aenq_e;
2015 	struct ena_admin_aenq_common_desc *aenq_common;
2016 	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2017 	u64 timestamp;
2018 	ena_aenq_handler handler_cb;
2019 	u16 masked_head, processed = 0;
2020 	u8 phase;
2021 
2022 	masked_head = aenq->head & (aenq->q_depth - 1);
2023 	phase = aenq->phase;
2024 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2025 	aenq_common = &aenq_e->aenq_common_desc;
2026 
2027 	/* Go over all the events */
2028 	while ((READ_ONCE(aenq_common->flags) &
2029 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2030 		/* Make sure the phase bit (ownership) is as expected before
2031 		 * reading the rest of the descriptor.
2032 		 */
2033 		dma_rmb();
2034 
2035 		timestamp = (u64)aenq_common->timestamp_low |
2036 			((u64)aenq_common->timestamp_high << 32);
2037 
2038 		pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2039 			 aenq_common->group, aenq_common->syndrome, timestamp);
2040 
2041 		/* Handle specific event*/
2042 		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2043 							  aenq_common->group);
2044 		handler_cb(data, aenq_e); /* call the actual event handler*/
2045 
2046 		/* Get next event entry */
2047 		masked_head++;
2048 		processed++;
2049 
2050 		if (unlikely(masked_head == aenq->q_depth)) {
2051 			masked_head = 0;
2052 			phase = !phase;
2053 		}
2054 		aenq_e = &aenq->entries[masked_head];
2055 		aenq_common = &aenq_e->aenq_common_desc;
2056 	}
2057 
2058 	aenq->head += processed;
2059 	aenq->phase = phase;
2060 
2061 	/* Don't update aenq doorbell if there weren't any processed events */
2062 	if (!processed)
2063 		return;
2064 
2065 	/* write the aenq doorbell after all AENQ descriptors were read */
2066 	mb();
2067 	writel_relaxed((u32)aenq->head,
2068 		       ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2069 }
2070 
2071 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2072 		      enum ena_regs_reset_reason_types reset_reason)
2073 {
2074 	u32 stat, timeout, cap, reset_val;
2075 	int rc;
2076 
2077 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2078 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2079 
2080 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2081 		     (cap == ENA_MMIO_READ_TIMEOUT))) {
2082 		pr_err("Reg read32 timeout occurred\n");
2083 		return -ETIME;
2084 	}
2085 
2086 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2087 		pr_err("Device isn't ready, can't reset device\n");
2088 		return -EINVAL;
2089 	}
2090 
2091 	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2092 			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2093 	if (timeout == 0) {
2094 		pr_err("Invalid timeout value\n");
2095 		return -EINVAL;
2096 	}
2097 
2098 	/* start reset */
2099 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2100 	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2101 		     ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2102 	writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2103 
2104 	/* Write again the MMIO read request address */
2105 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2106 
2107 	rc = wait_for_reset_state(ena_dev, timeout,
2108 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2109 	if (rc != 0) {
2110 		pr_err("Reset indication didn't turn on\n");
2111 		return rc;
2112 	}
2113 
2114 	/* reset done */
2115 	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2116 	rc = wait_for_reset_state(ena_dev, timeout, 0);
2117 	if (rc != 0) {
2118 		pr_err("Reset indication didn't turn off\n");
2119 		return rc;
2120 	}
2121 
2122 	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2123 		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2124 	if (timeout)
2125 		/* the resolution of timeout reg is 100ms */
2126 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2127 	else
2128 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2129 
2130 	return 0;
2131 }
2132 
2133 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2134 			     struct ena_com_stats_ctx *ctx,
2135 			     enum ena_admin_get_stats_type type)
2136 {
2137 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2138 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2139 	struct ena_com_admin_queue *admin_queue;
2140 	int ret;
2141 
2142 	admin_queue = &ena_dev->admin_queue;
2143 
2144 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2145 	get_cmd->aq_common_descriptor.flags = 0;
2146 	get_cmd->type = type;
2147 
2148 	ret =  ena_com_execute_admin_command(admin_queue,
2149 					     (struct ena_admin_aq_entry *)get_cmd,
2150 					     sizeof(*get_cmd),
2151 					     (struct ena_admin_acq_entry *)get_resp,
2152 					     sizeof(*get_resp));
2153 
2154 	if (unlikely(ret))
2155 		pr_err("Failed to get stats. error: %d\n", ret);
2156 
2157 	return ret;
2158 }
2159 
2160 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2161 			  struct ena_admin_eni_stats *stats)
2162 {
2163 	struct ena_com_stats_ctx ctx;
2164 	int ret;
2165 
2166 	memset(&ctx, 0x0, sizeof(ctx));
2167 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2168 	if (likely(ret == 0))
2169 		memcpy(stats, &ctx.get_resp.u.eni_stats,
2170 		       sizeof(ctx.get_resp.u.eni_stats));
2171 
2172 	return ret;
2173 }
2174 
2175 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2176 				struct ena_admin_basic_stats *stats)
2177 {
2178 	struct ena_com_stats_ctx ctx;
2179 	int ret;
2180 
2181 	memset(&ctx, 0x0, sizeof(ctx));
2182 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2183 	if (likely(ret == 0))
2184 		memcpy(stats, &ctx.get_resp.u.basic_stats,
2185 		       sizeof(ctx.get_resp.u.basic_stats));
2186 
2187 	return ret;
2188 }
2189 
2190 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2191 {
2192 	struct ena_com_admin_queue *admin_queue;
2193 	struct ena_admin_set_feat_cmd cmd;
2194 	struct ena_admin_set_feat_resp resp;
2195 	int ret;
2196 
2197 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2198 		pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2199 		return -EOPNOTSUPP;
2200 	}
2201 
2202 	memset(&cmd, 0x0, sizeof(cmd));
2203 	admin_queue = &ena_dev->admin_queue;
2204 
2205 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2206 	cmd.aq_common_descriptor.flags = 0;
2207 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2208 	cmd.u.mtu.mtu = mtu;
2209 
2210 	ret = ena_com_execute_admin_command(admin_queue,
2211 					    (struct ena_admin_aq_entry *)&cmd,
2212 					    sizeof(cmd),
2213 					    (struct ena_admin_acq_entry *)&resp,
2214 					    sizeof(resp));
2215 
2216 	if (unlikely(ret))
2217 		pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2218 
2219 	return ret;
2220 }
2221 
2222 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2223 				 struct ena_admin_feature_offload_desc *offload)
2224 {
2225 	int ret;
2226 	struct ena_admin_get_feat_resp resp;
2227 
2228 	ret = ena_com_get_feature(ena_dev, &resp,
2229 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2230 	if (unlikely(ret)) {
2231 		pr_err("Failed to get offload capabilities %d\n", ret);
2232 		return ret;
2233 	}
2234 
2235 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2236 
2237 	return 0;
2238 }
2239 
2240 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2241 {
2242 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2243 	struct ena_rss *rss = &ena_dev->rss;
2244 	struct ena_admin_set_feat_cmd cmd;
2245 	struct ena_admin_set_feat_resp resp;
2246 	struct ena_admin_get_feat_resp get_resp;
2247 	int ret;
2248 
2249 	if (!ena_com_check_supported_feature_id(ena_dev,
2250 						ENA_ADMIN_RSS_HASH_FUNCTION)) {
2251 		pr_debug("Feature %d isn't supported\n",
2252 			 ENA_ADMIN_RSS_HASH_FUNCTION);
2253 		return -EOPNOTSUPP;
2254 	}
2255 
2256 	/* Validate hash function is supported */
2257 	ret = ena_com_get_feature(ena_dev, &get_resp,
2258 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2259 	if (unlikely(ret))
2260 		return ret;
2261 
2262 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2263 		pr_err("Func hash %d isn't supported by device, abort\n",
2264 		       rss->hash_func);
2265 		return -EOPNOTSUPP;
2266 	}
2267 
2268 	memset(&cmd, 0x0, sizeof(cmd));
2269 
2270 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2271 	cmd.aq_common_descriptor.flags =
2272 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2273 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2274 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2275 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2276 
2277 	ret = ena_com_mem_addr_set(ena_dev,
2278 				   &cmd.control_buffer.address,
2279 				   rss->hash_key_dma_addr);
2280 	if (unlikely(ret)) {
2281 		pr_err("Memory address set failed\n");
2282 		return ret;
2283 	}
2284 
2285 	cmd.control_buffer.length = sizeof(*rss->hash_key);
2286 
2287 	ret = ena_com_execute_admin_command(admin_queue,
2288 					    (struct ena_admin_aq_entry *)&cmd,
2289 					    sizeof(cmd),
2290 					    (struct ena_admin_acq_entry *)&resp,
2291 					    sizeof(resp));
2292 	if (unlikely(ret)) {
2293 		pr_err("Failed to set hash function %d. error: %d\n",
2294 		       rss->hash_func, ret);
2295 		return -EINVAL;
2296 	}
2297 
2298 	return 0;
2299 }
2300 
2301 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2302 			       enum ena_admin_hash_functions func,
2303 			       const u8 *key, u16 key_len, u32 init_val)
2304 {
2305 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2306 	struct ena_admin_get_feat_resp get_resp;
2307 	enum ena_admin_hash_functions old_func;
2308 	struct ena_rss *rss = &ena_dev->rss;
2309 	int rc;
2310 
2311 	hash_key = rss->hash_key;
2312 
2313 	/* Make sure size is a mult of DWs */
2314 	if (unlikely(key_len & 0x3))
2315 		return -EINVAL;
2316 
2317 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2318 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2319 				    rss->hash_key_dma_addr,
2320 				    sizeof(*rss->hash_key), 0);
2321 	if (unlikely(rc))
2322 		return rc;
2323 
2324 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2325 		pr_err("Flow hash function %d isn't supported\n", func);
2326 		return -EOPNOTSUPP;
2327 	}
2328 
2329 	switch (func) {
2330 	case ENA_ADMIN_TOEPLITZ:
2331 		if (key) {
2332 			if (key_len != sizeof(hash_key->key)) {
2333 				pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2334 				       key_len, sizeof(hash_key->key));
2335 				return -EINVAL;
2336 			}
2337 			memcpy(hash_key->key, key, key_len);
2338 			rss->hash_init_val = init_val;
2339 			hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2340 		}
2341 		break;
2342 	case ENA_ADMIN_CRC32:
2343 		rss->hash_init_val = init_val;
2344 		break;
2345 	default:
2346 		pr_err("Invalid hash function (%d)\n", func);
2347 		return -EINVAL;
2348 	}
2349 
2350 	old_func = rss->hash_func;
2351 	rss->hash_func = func;
2352 	rc = ena_com_set_hash_function(ena_dev);
2353 
2354 	/* Restore the old function */
2355 	if (unlikely(rc))
2356 		rss->hash_func = old_func;
2357 
2358 	return rc;
2359 }
2360 
2361 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2362 			      enum ena_admin_hash_functions *func)
2363 {
2364 	struct ena_rss *rss = &ena_dev->rss;
2365 	struct ena_admin_get_feat_resp get_resp;
2366 	int rc;
2367 
2368 	if (unlikely(!func))
2369 		return -EINVAL;
2370 
2371 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2372 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2373 				    rss->hash_key_dma_addr,
2374 				    sizeof(*rss->hash_key), 0);
2375 	if (unlikely(rc))
2376 		return rc;
2377 
2378 	/* ffs() returns 1 in case the lsb is set */
2379 	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2380 	if (rss->hash_func)
2381 		rss->hash_func--;
2382 
2383 	*func = rss->hash_func;
2384 
2385 	return 0;
2386 }
2387 
2388 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2389 {
2390 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2391 		ena_dev->rss.hash_key;
2392 
2393 	if (key)
2394 		memcpy(key, hash_key->key,
2395 		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2396 
2397 	return 0;
2398 }
2399 
2400 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2401 			  enum ena_admin_flow_hash_proto proto,
2402 			  u16 *fields)
2403 {
2404 	struct ena_rss *rss = &ena_dev->rss;
2405 	struct ena_admin_get_feat_resp get_resp;
2406 	int rc;
2407 
2408 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2409 				    ENA_ADMIN_RSS_HASH_INPUT,
2410 				    rss->hash_ctrl_dma_addr,
2411 				    sizeof(*rss->hash_ctrl), 0);
2412 	if (unlikely(rc))
2413 		return rc;
2414 
2415 	if (fields)
2416 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2417 
2418 	return 0;
2419 }
2420 
2421 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2422 {
2423 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2424 	struct ena_rss *rss = &ena_dev->rss;
2425 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2426 	struct ena_admin_set_feat_cmd cmd;
2427 	struct ena_admin_set_feat_resp resp;
2428 	int ret;
2429 
2430 	if (!ena_com_check_supported_feature_id(ena_dev,
2431 						ENA_ADMIN_RSS_HASH_INPUT)) {
2432 		pr_debug("Feature %d isn't supported\n",
2433 			 ENA_ADMIN_RSS_HASH_INPUT);
2434 		return -EOPNOTSUPP;
2435 	}
2436 
2437 	memset(&cmd, 0x0, sizeof(cmd));
2438 
2439 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2440 	cmd.aq_common_descriptor.flags =
2441 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2442 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2443 	cmd.u.flow_hash_input.enabled_input_sort =
2444 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2445 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2446 
2447 	ret = ena_com_mem_addr_set(ena_dev,
2448 				   &cmd.control_buffer.address,
2449 				   rss->hash_ctrl_dma_addr);
2450 	if (unlikely(ret)) {
2451 		pr_err("Memory address set failed\n");
2452 		return ret;
2453 	}
2454 	cmd.control_buffer.length = sizeof(*hash_ctrl);
2455 
2456 	ret = ena_com_execute_admin_command(admin_queue,
2457 					    (struct ena_admin_aq_entry *)&cmd,
2458 					    sizeof(cmd),
2459 					    (struct ena_admin_acq_entry *)&resp,
2460 					    sizeof(resp));
2461 	if (unlikely(ret))
2462 		pr_err("Failed to set hash input. error: %d\n", ret);
2463 
2464 	return ret;
2465 }
2466 
2467 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2468 {
2469 	struct ena_rss *rss = &ena_dev->rss;
2470 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2471 		rss->hash_ctrl;
2472 	u16 available_fields = 0;
2473 	int rc, i;
2474 
2475 	/* Get the supported hash input */
2476 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2477 	if (unlikely(rc))
2478 		return rc;
2479 
2480 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2481 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2482 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2483 
2484 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2485 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2486 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2487 
2488 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2489 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2490 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2491 
2492 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2493 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2494 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2495 
2496 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2497 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2498 
2499 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2500 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2501 
2502 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2503 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2504 
2505 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2506 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2507 
2508 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2509 		available_fields = hash_ctrl->selected_fields[i].fields &
2510 				hash_ctrl->supported_fields[i].fields;
2511 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2512 			pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2513 			       i, hash_ctrl->supported_fields[i].fields,
2514 			       hash_ctrl->selected_fields[i].fields);
2515 			return -EOPNOTSUPP;
2516 		}
2517 	}
2518 
2519 	rc = ena_com_set_hash_ctrl(ena_dev);
2520 
2521 	/* In case of failure, restore the old hash ctrl */
2522 	if (unlikely(rc))
2523 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2524 
2525 	return rc;
2526 }
2527 
2528 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2529 			   enum ena_admin_flow_hash_proto proto,
2530 			   u16 hash_fields)
2531 {
2532 	struct ena_rss *rss = &ena_dev->rss;
2533 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2534 	u16 supported_fields;
2535 	int rc;
2536 
2537 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2538 		pr_err("Invalid proto num (%u)\n", proto);
2539 		return -EINVAL;
2540 	}
2541 
2542 	/* Get the ctrl table */
2543 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2544 	if (unlikely(rc))
2545 		return rc;
2546 
2547 	/* Make sure all the fields are supported */
2548 	supported_fields = hash_ctrl->supported_fields[proto].fields;
2549 	if ((hash_fields & supported_fields) != hash_fields) {
2550 		pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
2551 		       proto, hash_fields, supported_fields);
2552 	}
2553 
2554 	hash_ctrl->selected_fields[proto].fields = hash_fields;
2555 
2556 	rc = ena_com_set_hash_ctrl(ena_dev);
2557 
2558 	/* In case of failure, restore the old hash ctrl */
2559 	if (unlikely(rc))
2560 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2561 
2562 	return 0;
2563 }
2564 
2565 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2566 				      u16 entry_idx, u16 entry_value)
2567 {
2568 	struct ena_rss *rss = &ena_dev->rss;
2569 
2570 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2571 		return -EINVAL;
2572 
2573 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2574 		return -EINVAL;
2575 
2576 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2577 
2578 	return 0;
2579 }
2580 
2581 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2582 {
2583 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2584 	struct ena_rss *rss = &ena_dev->rss;
2585 	struct ena_admin_set_feat_cmd cmd;
2586 	struct ena_admin_set_feat_resp resp;
2587 	int ret;
2588 
2589 	if (!ena_com_check_supported_feature_id(
2590 		    ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2591 		pr_debug("Feature %d isn't supported\n",
2592 			 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2593 		return -EOPNOTSUPP;
2594 	}
2595 
2596 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2597 	if (ret) {
2598 		pr_err("Failed to convert host indirection table to device table\n");
2599 		return ret;
2600 	}
2601 
2602 	memset(&cmd, 0x0, sizeof(cmd));
2603 
2604 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2605 	cmd.aq_common_descriptor.flags =
2606 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2607 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2608 	cmd.u.ind_table.size = rss->tbl_log_size;
2609 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2610 
2611 	ret = ena_com_mem_addr_set(ena_dev,
2612 				   &cmd.control_buffer.address,
2613 				   rss->rss_ind_tbl_dma_addr);
2614 	if (unlikely(ret)) {
2615 		pr_err("Memory address set failed\n");
2616 		return ret;
2617 	}
2618 
2619 	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2620 		sizeof(struct ena_admin_rss_ind_table_entry);
2621 
2622 	ret = ena_com_execute_admin_command(admin_queue,
2623 					    (struct ena_admin_aq_entry *)&cmd,
2624 					    sizeof(cmd),
2625 					    (struct ena_admin_acq_entry *)&resp,
2626 					    sizeof(resp));
2627 
2628 	if (unlikely(ret))
2629 		pr_err("Failed to set indirect table. error: %d\n", ret);
2630 
2631 	return ret;
2632 }
2633 
2634 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2635 {
2636 	struct ena_rss *rss = &ena_dev->rss;
2637 	struct ena_admin_get_feat_resp get_resp;
2638 	u32 tbl_size;
2639 	int i, rc;
2640 
2641 	tbl_size = (1ULL << rss->tbl_log_size) *
2642 		sizeof(struct ena_admin_rss_ind_table_entry);
2643 
2644 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2645 				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2646 				    rss->rss_ind_tbl_dma_addr,
2647 				    tbl_size, 0);
2648 	if (unlikely(rc))
2649 		return rc;
2650 
2651 	if (!ind_tbl)
2652 		return 0;
2653 
2654 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2655 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2656 
2657 	return 0;
2658 }
2659 
2660 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2661 {
2662 	int rc;
2663 
2664 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2665 
2666 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2667 	if (unlikely(rc))
2668 		goto err_indr_tbl;
2669 
2670 	/* The following function might return unsupported in case the
2671 	 * device doesn't support setting the key / hash function. We can safely
2672 	 * ignore this error and have indirection table support only.
2673 	 */
2674 	rc = ena_com_hash_key_allocate(ena_dev);
2675 	if (likely(!rc))
2676 		ena_com_hash_key_fill_default_key(ena_dev);
2677 	else if (rc != -EOPNOTSUPP)
2678 		goto err_hash_key;
2679 
2680 	rc = ena_com_hash_ctrl_init(ena_dev);
2681 	if (unlikely(rc))
2682 		goto err_hash_ctrl;
2683 
2684 	return 0;
2685 
2686 err_hash_ctrl:
2687 	ena_com_hash_key_destroy(ena_dev);
2688 err_hash_key:
2689 	ena_com_indirect_table_destroy(ena_dev);
2690 err_indr_tbl:
2691 
2692 	return rc;
2693 }
2694 
2695 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2696 {
2697 	ena_com_indirect_table_destroy(ena_dev);
2698 	ena_com_hash_key_destroy(ena_dev);
2699 	ena_com_hash_ctrl_destroy(ena_dev);
2700 
2701 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2702 }
2703 
2704 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2705 {
2706 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2707 
2708 	host_attr->host_info =
2709 		dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2710 				   &host_attr->host_info_dma_addr, GFP_KERNEL);
2711 	if (unlikely(!host_attr->host_info))
2712 		return -ENOMEM;
2713 
2714 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2715 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2716 		(ENA_COMMON_SPEC_VERSION_MINOR));
2717 
2718 	return 0;
2719 }
2720 
2721 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2722 				u32 debug_area_size)
2723 {
2724 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2725 
2726 	host_attr->debug_area_virt_addr =
2727 		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2728 				   &host_attr->debug_area_dma_addr, GFP_KERNEL);
2729 	if (unlikely(!host_attr->debug_area_virt_addr)) {
2730 		host_attr->debug_area_size = 0;
2731 		return -ENOMEM;
2732 	}
2733 
2734 	host_attr->debug_area_size = debug_area_size;
2735 
2736 	return 0;
2737 }
2738 
2739 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2740 {
2741 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2742 
2743 	if (host_attr->host_info) {
2744 		dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2745 				  host_attr->host_info_dma_addr);
2746 		host_attr->host_info = NULL;
2747 	}
2748 }
2749 
2750 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2751 {
2752 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2753 
2754 	if (host_attr->debug_area_virt_addr) {
2755 		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2756 				  host_attr->debug_area_virt_addr,
2757 				  host_attr->debug_area_dma_addr);
2758 		host_attr->debug_area_virt_addr = NULL;
2759 	}
2760 }
2761 
2762 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2763 {
2764 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2765 	struct ena_com_admin_queue *admin_queue;
2766 	struct ena_admin_set_feat_cmd cmd;
2767 	struct ena_admin_set_feat_resp resp;
2768 
2769 	int ret;
2770 
2771 	/* Host attribute config is called before ena_com_get_dev_attr_feat
2772 	 * so ena_com can't check if the feature is supported.
2773 	 */
2774 
2775 	memset(&cmd, 0x0, sizeof(cmd));
2776 	admin_queue = &ena_dev->admin_queue;
2777 
2778 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2779 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2780 
2781 	ret = ena_com_mem_addr_set(ena_dev,
2782 				   &cmd.u.host_attr.debug_ba,
2783 				   host_attr->debug_area_dma_addr);
2784 	if (unlikely(ret)) {
2785 		pr_err("Memory address set failed\n");
2786 		return ret;
2787 	}
2788 
2789 	ret = ena_com_mem_addr_set(ena_dev,
2790 				   &cmd.u.host_attr.os_info_ba,
2791 				   host_attr->host_info_dma_addr);
2792 	if (unlikely(ret)) {
2793 		pr_err("Memory address set failed\n");
2794 		return ret;
2795 	}
2796 
2797 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2798 
2799 	ret = ena_com_execute_admin_command(admin_queue,
2800 					    (struct ena_admin_aq_entry *)&cmd,
2801 					    sizeof(cmd),
2802 					    (struct ena_admin_acq_entry *)&resp,
2803 					    sizeof(resp));
2804 
2805 	if (unlikely(ret))
2806 		pr_err("Failed to set host attributes: %d\n", ret);
2807 
2808 	return ret;
2809 }
2810 
2811 /* Interrupt moderation */
2812 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2813 {
2814 	return ena_com_check_supported_feature_id(ena_dev,
2815 						  ENA_ADMIN_INTERRUPT_MODERATION);
2816 }
2817 
2818 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2819 							  u32 intr_delay_resolution,
2820 							  u32 *intr_moder_interval)
2821 {
2822 	if (!intr_delay_resolution) {
2823 		pr_err("Illegal interrupt delay granularity value\n");
2824 		return -EFAULT;
2825 	}
2826 
2827 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2828 
2829 	return 0;
2830 }
2831 
2832 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2833 						      u32 tx_coalesce_usecs)
2834 {
2835 	return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2836 							      ena_dev->intr_delay_resolution,
2837 							      &ena_dev->intr_moder_tx_interval);
2838 }
2839 
2840 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2841 						      u32 rx_coalesce_usecs)
2842 {
2843 	return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2844 							      ena_dev->intr_delay_resolution,
2845 							      &ena_dev->intr_moder_rx_interval);
2846 }
2847 
2848 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2849 {
2850 	struct ena_admin_get_feat_resp get_resp;
2851 	u16 delay_resolution;
2852 	int rc;
2853 
2854 	rc = ena_com_get_feature(ena_dev, &get_resp,
2855 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2856 
2857 	if (rc) {
2858 		if (rc == -EOPNOTSUPP) {
2859 			pr_debug("Feature %d isn't supported\n",
2860 				 ENA_ADMIN_INTERRUPT_MODERATION);
2861 			rc = 0;
2862 		} else {
2863 			pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2864 			       rc);
2865 		}
2866 
2867 		/* no moderation supported, disable adaptive support */
2868 		ena_com_disable_adaptive_moderation(ena_dev);
2869 		return rc;
2870 	}
2871 
2872 	/* if moderation is supported by device we set adaptive moderation */
2873 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2874 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2875 
2876 	/* Disable adaptive moderation by default - can be enabled later */
2877 	ena_com_disable_adaptive_moderation(ena_dev);
2878 
2879 	return 0;
2880 }
2881 
2882 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2883 {
2884 	return ena_dev->intr_moder_tx_interval;
2885 }
2886 
2887 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2888 {
2889 	return ena_dev->intr_moder_rx_interval;
2890 }
2891 
2892 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2893 			    struct ena_admin_feature_llq_desc *llq_features,
2894 			    struct ena_llq_configurations *llq_default_cfg)
2895 {
2896 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2897 	int rc;
2898 
2899 	if (!llq_features->max_llq_num) {
2900 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2901 		return 0;
2902 	}
2903 
2904 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2905 	if (rc)
2906 		return rc;
2907 
2908 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2909 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2910 
2911 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
2912 		pr_err("The size of the LLQ entry is smaller than needed\n");
2913 		return -EINVAL;
2914 	}
2915 
2916 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2917 
2918 	return 0;
2919 }
2920