1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/device.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
17 
18 #include "pci_hw.h"
19 #include "pci.h"
20 #include "core.h"
21 #include "cmd.h"
22 #include "port.h"
23 #include "resources.h"
24 
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
29 
30 enum mlxsw_pci_queue_type {
31 	MLXSW_PCI_QUEUE_TYPE_SDQ,
32 	MLXSW_PCI_QUEUE_TYPE_RDQ,
33 	MLXSW_PCI_QUEUE_TYPE_CQ,
34 	MLXSW_PCI_QUEUE_TYPE_EQ,
35 };
36 
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT	4
38 
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
44 };
45 
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
47 	0, /* unused */
48 	0, /* unused */
49 	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
51 };
52 
53 struct mlxsw_pci_mem_item {
54 	char *buf;
55 	dma_addr_t mapaddr;
56 	size_t size;
57 };
58 
59 struct mlxsw_pci_queue_elem_info {
60 	char *elem; /* pointer to actual dma mapped element mem chunk */
61 	union {
62 		struct {
63 			struct sk_buff *skb;
64 		} sdq;
65 		struct {
66 			struct sk_buff *skb;
67 		} rdq;
68 	} u;
69 };
70 
71 struct mlxsw_pci_queue {
72 	spinlock_t lock; /* for queue accesses */
73 	struct mlxsw_pci_mem_item mem_item;
74 	struct mlxsw_pci_queue_elem_info *elem_info;
75 	u16 producer_counter;
76 	u16 consumer_counter;
77 	u16 count; /* number of elements in queue */
78 	u8 num; /* queue number */
79 	u8 elem_size; /* size of one element */
80 	enum mlxsw_pci_queue_type type;
81 	struct tasklet_struct tasklet; /* queue processing tasklet */
82 	struct mlxsw_pci *pci;
83 	union {
84 		struct {
85 			u32 comp_sdq_count;
86 			u32 comp_rdq_count;
87 			enum mlxsw_pci_cqe_v v;
88 		} cq;
89 		struct {
90 			u32 ev_cmd_count;
91 			u32 ev_comp_count;
92 			u32 ev_other_count;
93 		} eq;
94 	} u;
95 };
96 
97 struct mlxsw_pci_queue_type_group {
98 	struct mlxsw_pci_queue *q;
99 	u8 count; /* number of queues in group */
100 };
101 
102 struct mlxsw_pci {
103 	struct pci_dev *pdev;
104 	u8 __iomem *hw_addr;
105 	u64 free_running_clock_offset;
106 	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
107 	u32 doorbell_offset;
108 	struct mlxsw_core *core;
109 	struct {
110 		struct mlxsw_pci_mem_item *items;
111 		unsigned int count;
112 	} fw_area;
113 	struct {
114 		struct mlxsw_pci_mem_item out_mbox;
115 		struct mlxsw_pci_mem_item in_mbox;
116 		struct mutex lock; /* Lock access to command registers */
117 		bool nopoll;
118 		wait_queue_head_t wait;
119 		bool wait_done;
120 		struct {
121 			u8 status;
122 			u64 out_param;
123 		} comp;
124 	} cmd;
125 	struct mlxsw_bus_info bus_info;
126 	const struct pci_device_id *id;
127 	enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128 	u8 num_sdq_cqs; /* Number of CQs used for SDQs */
129 };
130 
131 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
132 {
133 	tasklet_schedule(&q->tasklet);
134 }
135 
136 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137 					size_t elem_size, int elem_index)
138 {
139 	return q->mem_item.buf + (elem_size * elem_index);
140 }
141 
142 static struct mlxsw_pci_queue_elem_info *
143 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
144 {
145 	return &q->elem_info[elem_index];
146 }
147 
148 static struct mlxsw_pci_queue_elem_info *
149 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
150 {
151 	int index = q->producer_counter & (q->count - 1);
152 
153 	if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
154 		return NULL;
155 	return mlxsw_pci_queue_elem_info_get(q, index);
156 }
157 
158 static struct mlxsw_pci_queue_elem_info *
159 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
160 {
161 	int index = q->consumer_counter & (q->count - 1);
162 
163 	return mlxsw_pci_queue_elem_info_get(q, index);
164 }
165 
166 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
167 {
168 	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
169 }
170 
171 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
172 {
173 	return owner_bit != !!(q->consumer_counter & q->count);
174 }
175 
176 static struct mlxsw_pci_queue_type_group *
177 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178 			       enum mlxsw_pci_queue_type q_type)
179 {
180 	return &mlxsw_pci->queues[q_type];
181 }
182 
183 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184 				  enum mlxsw_pci_queue_type q_type)
185 {
186 	struct mlxsw_pci_queue_type_group *queue_group;
187 
188 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189 	return queue_group->count;
190 }
191 
192 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
193 {
194 	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
195 }
196 
197 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
198 {
199 	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
200 }
201 
202 static struct mlxsw_pci_queue *
203 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204 		      enum mlxsw_pci_queue_type q_type, u8 q_num)
205 {
206 	return &mlxsw_pci->queues[q_type].q[q_num];
207 }
208 
209 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
210 						 u8 q_num)
211 {
212 	return __mlxsw_pci_queue_get(mlxsw_pci,
213 				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
214 }
215 
216 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
217 						 u8 q_num)
218 {
219 	return __mlxsw_pci_queue_get(mlxsw_pci,
220 				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
221 }
222 
223 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
224 						u8 q_num)
225 {
226 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
227 }
228 
229 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
230 						u8 q_num)
231 {
232 	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
233 }
234 
235 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236 					   struct mlxsw_pci_queue *q,
237 					   u16 val)
238 {
239 	mlxsw_pci_write32(mlxsw_pci,
240 			  DOORBELL(mlxsw_pci->doorbell_offset,
241 				   mlxsw_pci_doorbell_type_offset[q->type],
242 				   q->num), val);
243 }
244 
245 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246 					       struct mlxsw_pci_queue *q,
247 					       u16 val)
248 {
249 	mlxsw_pci_write32(mlxsw_pci,
250 			  DOORBELL(mlxsw_pci->doorbell_offset,
251 				   mlxsw_pci_doorbell_arm_type_offset[q->type],
252 				   q->num), val);
253 }
254 
255 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256 						   struct mlxsw_pci_queue *q)
257 {
258 	wmb(); /* ensure all writes are done before we ring a bell */
259 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
260 }
261 
262 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263 						   struct mlxsw_pci_queue *q)
264 {
265 	wmb(); /* ensure all writes are done before we ring a bell */
266 	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267 				       q->consumer_counter + q->count);
268 }
269 
270 static void
271 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 					   struct mlxsw_pci_queue *q)
273 {
274 	wmb(); /* ensure all writes are done before we ring a bell */
275 	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
276 }
277 
278 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
279 					     int page_index)
280 {
281 	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
282 }
283 
284 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285 			      struct mlxsw_pci_queue *q)
286 {
287 	int tclass;
288 	int i;
289 	int err;
290 
291 	q->producer_counter = 0;
292 	q->consumer_counter = 0;
293 	tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
294 						      MLXSW_PCI_SDQ_CTL_TC;
295 
296 	/* Set CQ of same number of this SDQ. */
297 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
298 	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
299 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
300 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
301 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
302 
303 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
304 	}
305 
306 	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
307 	if (err)
308 		return err;
309 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
310 	return 0;
311 }
312 
313 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
314 			       struct mlxsw_pci_queue *q)
315 {
316 	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
317 }
318 
319 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
320 				  int index, char *frag_data, size_t frag_len,
321 				  int direction)
322 {
323 	struct pci_dev *pdev = mlxsw_pci->pdev;
324 	dma_addr_t mapaddr;
325 
326 	mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
327 	if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
328 		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
329 		return -EIO;
330 	}
331 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
332 	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
333 	return 0;
334 }
335 
336 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
337 				     int index, int direction)
338 {
339 	struct pci_dev *pdev = mlxsw_pci->pdev;
340 	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
341 	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
342 
343 	if (!frag_len)
344 		return;
345 	dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
346 }
347 
348 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
349 				   struct mlxsw_pci_queue_elem_info *elem_info)
350 {
351 	size_t buf_len = MLXSW_PORT_MAX_MTU;
352 	char *wqe = elem_info->elem;
353 	struct sk_buff *skb;
354 	int err;
355 
356 	elem_info->u.rdq.skb = NULL;
357 	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
358 	if (!skb)
359 		return -ENOMEM;
360 
361 	/* Assume that wqe was previously zeroed. */
362 
363 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
364 				     buf_len, DMA_FROM_DEVICE);
365 	if (err)
366 		goto err_frag_map;
367 
368 	elem_info->u.rdq.skb = skb;
369 	return 0;
370 
371 err_frag_map:
372 	dev_kfree_skb_any(skb);
373 	return err;
374 }
375 
376 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
377 				   struct mlxsw_pci_queue_elem_info *elem_info)
378 {
379 	struct sk_buff *skb;
380 	char *wqe;
381 
382 	skb = elem_info->u.rdq.skb;
383 	wqe = elem_info->elem;
384 
385 	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
386 	dev_kfree_skb_any(skb);
387 }
388 
389 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
390 			      struct mlxsw_pci_queue *q)
391 {
392 	struct mlxsw_pci_queue_elem_info *elem_info;
393 	u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
394 	int i;
395 	int err;
396 
397 	q->producer_counter = 0;
398 	q->consumer_counter = 0;
399 
400 	/* Set CQ of same number of this RDQ with base
401 	 * above SDQ count as the lower ones are assigned to SDQs.
402 	 */
403 	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
404 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
405 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
406 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
407 
408 		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
409 	}
410 
411 	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
412 	if (err)
413 		return err;
414 
415 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
416 
417 	for (i = 0; i < q->count; i++) {
418 		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
419 		BUG_ON(!elem_info);
420 		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
421 		if (err)
422 			goto rollback;
423 		/* Everything is set up, ring doorbell to pass elem to HW */
424 		q->producer_counter++;
425 		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
426 	}
427 
428 	return 0;
429 
430 rollback:
431 	for (i--; i >= 0; i--) {
432 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
433 		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
434 	}
435 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
436 
437 	return err;
438 }
439 
440 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
441 			       struct mlxsw_pci_queue *q)
442 {
443 	struct mlxsw_pci_queue_elem_info *elem_info;
444 	int i;
445 
446 	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
447 	for (i = 0; i < q->count; i++) {
448 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
449 		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
450 	}
451 }
452 
453 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
454 				  struct mlxsw_pci_queue *q)
455 {
456 	q->u.cq.v = mlxsw_pci->max_cqe_ver;
457 
458 	/* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
459 	if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
460 	    q->num < mlxsw_pci->num_sdq_cqs)
461 		q->u.cq.v = MLXSW_PCI_CQE_V1;
462 }
463 
464 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
465 			     struct mlxsw_pci_queue *q)
466 {
467 	int i;
468 	int err;
469 
470 	q->consumer_counter = 0;
471 
472 	for (i = 0; i < q->count; i++) {
473 		char *elem = mlxsw_pci_queue_elem_get(q, i);
474 
475 		mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
476 	}
477 
478 	if (q->u.cq.v == MLXSW_PCI_CQE_V1)
479 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
480 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
481 	else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
482 		mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
483 				MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
484 
485 	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
486 	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
487 	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
488 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
489 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
490 
491 		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
492 	}
493 	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
494 	if (err)
495 		return err;
496 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
497 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
498 	return 0;
499 }
500 
501 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
502 			      struct mlxsw_pci_queue *q)
503 {
504 	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
505 }
506 
507 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
508 				     struct mlxsw_pci_queue *q,
509 				     u16 consumer_counter_limit,
510 				     char *cqe)
511 {
512 	struct pci_dev *pdev = mlxsw_pci->pdev;
513 	struct mlxsw_pci_queue_elem_info *elem_info;
514 	struct mlxsw_tx_info tx_info;
515 	char *wqe;
516 	struct sk_buff *skb;
517 	int i;
518 
519 	spin_lock(&q->lock);
520 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
521 	tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
522 	skb = elem_info->u.sdq.skb;
523 	wqe = elem_info->elem;
524 	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
525 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
526 
527 	if (unlikely(!tx_info.is_emad &&
528 		     skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
529 		mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
530 					   tx_info.local_port);
531 		skb = NULL;
532 	}
533 
534 	if (skb)
535 		dev_kfree_skb_any(skb);
536 	elem_info->u.sdq.skb = NULL;
537 
538 	if (q->consumer_counter++ != consumer_counter_limit)
539 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
540 	spin_unlock(&q->lock);
541 }
542 
543 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
544 				     struct mlxsw_pci_queue *q,
545 				     u16 consumer_counter_limit,
546 				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
547 {
548 	struct pci_dev *pdev = mlxsw_pci->pdev;
549 	struct mlxsw_pci_queue_elem_info *elem_info;
550 	struct mlxsw_rx_info rx_info = {};
551 	char *wqe;
552 	struct sk_buff *skb;
553 	u16 byte_count;
554 	int err;
555 
556 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
557 	skb = elem_info->u.sdq.skb;
558 	if (!skb)
559 		return;
560 	wqe = elem_info->elem;
561 	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
562 
563 	if (q->consumer_counter++ != consumer_counter_limit)
564 		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
565 
566 	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
567 		rx_info.is_lag = true;
568 		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
569 		rx_info.lag_port_index =
570 			mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
571 	} else {
572 		rx_info.is_lag = false;
573 		rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
574 	}
575 
576 	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
577 
578 	if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
579 	    rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
580 		u32 cookie_index = 0;
581 
582 		if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
583 			cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
584 		mlxsw_skb_cb(skb)->cookie_index = cookie_index;
585 	} else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
586 		   rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
587 		   mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
588 		rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
589 	}
590 
591 	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
592 	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
593 		byte_count -= ETH_FCS_LEN;
594 	skb_put(skb, byte_count);
595 	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
596 
597 	memset(wqe, 0, q->elem_size);
598 	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
599 	if (err)
600 		dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
601 	/* Everything is set up, ring doorbell to pass elem to HW */
602 	q->producer_counter++;
603 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
604 	return;
605 }
606 
607 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
608 {
609 	struct mlxsw_pci_queue_elem_info *elem_info;
610 	char *elem;
611 	bool owner_bit;
612 
613 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
614 	elem = elem_info->elem;
615 	owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
616 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
617 		return NULL;
618 	q->consumer_counter++;
619 	rmb(); /* make sure we read owned bit before the rest of elem */
620 	return elem;
621 }
622 
623 static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
624 {
625 	struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
626 	struct mlxsw_pci *mlxsw_pci = q->pci;
627 	char *cqe;
628 	int items = 0;
629 	int credits = q->count >> 1;
630 
631 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
632 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
633 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
634 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
635 		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
636 
637 		memcpy(ncqe, cqe, q->elem_size);
638 		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
639 
640 		if (sendq) {
641 			struct mlxsw_pci_queue *sdq;
642 
643 			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
644 			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
645 						 wqe_counter, ncqe);
646 			q->u.cq.comp_sdq_count++;
647 		} else {
648 			struct mlxsw_pci_queue *rdq;
649 
650 			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
651 			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
652 						 wqe_counter, q->u.cq.v, ncqe);
653 			q->u.cq.comp_rdq_count++;
654 		}
655 		if (++items == credits)
656 			break;
657 	}
658 	if (items)
659 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
660 }
661 
662 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
663 {
664 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
665 					       MLXSW_PCI_CQE01_COUNT;
666 }
667 
668 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
669 {
670 	return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
671 					       MLXSW_PCI_CQE01_SIZE;
672 }
673 
674 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
675 			     struct mlxsw_pci_queue *q)
676 {
677 	int i;
678 	int err;
679 
680 	q->consumer_counter = 0;
681 
682 	for (i = 0; i < q->count; i++) {
683 		char *elem = mlxsw_pci_queue_elem_get(q, i);
684 
685 		mlxsw_pci_eqe_owner_set(elem, 1);
686 	}
687 
688 	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
689 	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
690 	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
691 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
692 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
693 
694 		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
695 	}
696 	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
697 	if (err)
698 		return err;
699 	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
700 	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
701 	return 0;
702 }
703 
704 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
705 			      struct mlxsw_pci_queue *q)
706 {
707 	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
708 }
709 
710 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
711 {
712 	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
713 	mlxsw_pci->cmd.comp.out_param =
714 		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
715 		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
716 	mlxsw_pci->cmd.wait_done = true;
717 	wake_up(&mlxsw_pci->cmd.wait);
718 }
719 
720 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
721 {
722 	struct mlxsw_pci_queue_elem_info *elem_info;
723 	char *elem;
724 	bool owner_bit;
725 
726 	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
727 	elem = elem_info->elem;
728 	owner_bit = mlxsw_pci_eqe_owner_get(elem);
729 	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
730 		return NULL;
731 	q->consumer_counter++;
732 	rmb(); /* make sure we read owned bit before the rest of elem */
733 	return elem;
734 }
735 
736 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
737 {
738 	struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
739 	struct mlxsw_pci *mlxsw_pci = q->pci;
740 	u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
741 	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
742 	char *eqe;
743 	u8 cqn;
744 	bool cq_handle = false;
745 	int items = 0;
746 	int credits = q->count >> 1;
747 
748 	memset(&active_cqns, 0, sizeof(active_cqns));
749 
750 	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
751 
752 		/* Command interface completion events are always received on
753 		 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
754 		 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
755 		 */
756 		switch (q->num) {
757 		case MLXSW_PCI_EQ_ASYNC_NUM:
758 			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
759 			q->u.eq.ev_cmd_count++;
760 			break;
761 		case MLXSW_PCI_EQ_COMP_NUM:
762 			cqn = mlxsw_pci_eqe_cqn_get(eqe);
763 			set_bit(cqn, active_cqns);
764 			cq_handle = true;
765 			q->u.eq.ev_comp_count++;
766 			break;
767 		default:
768 			q->u.eq.ev_other_count++;
769 		}
770 		if (++items == credits)
771 			break;
772 	}
773 	if (items) {
774 		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
775 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
776 	}
777 
778 	if (!cq_handle)
779 		return;
780 	for_each_set_bit(cqn, active_cqns, cq_count) {
781 		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
782 		mlxsw_pci_queue_tasklet_schedule(q);
783 	}
784 }
785 
786 struct mlxsw_pci_queue_ops {
787 	const char *name;
788 	enum mlxsw_pci_queue_type type;
789 	void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
790 			 struct mlxsw_pci_queue *q);
791 	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
792 		    struct mlxsw_pci_queue *q);
793 	void (*fini)(struct mlxsw_pci *mlxsw_pci,
794 		     struct mlxsw_pci_queue *q);
795 	void (*tasklet)(struct tasklet_struct *t);
796 	u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
797 	u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
798 	u16 elem_count;
799 	u8 elem_size;
800 };
801 
802 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
803 	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
804 	.init		= mlxsw_pci_sdq_init,
805 	.fini		= mlxsw_pci_sdq_fini,
806 	.elem_count	= MLXSW_PCI_WQE_COUNT,
807 	.elem_size	= MLXSW_PCI_WQE_SIZE,
808 };
809 
810 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
811 	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
812 	.init		= mlxsw_pci_rdq_init,
813 	.fini		= mlxsw_pci_rdq_fini,
814 	.elem_count	= MLXSW_PCI_WQE_COUNT,
815 	.elem_size	= MLXSW_PCI_WQE_SIZE
816 };
817 
818 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
819 	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
820 	.pre_init	= mlxsw_pci_cq_pre_init,
821 	.init		= mlxsw_pci_cq_init,
822 	.fini		= mlxsw_pci_cq_fini,
823 	.tasklet	= mlxsw_pci_cq_tasklet,
824 	.elem_count_f	= mlxsw_pci_cq_elem_count,
825 	.elem_size_f	= mlxsw_pci_cq_elem_size
826 };
827 
828 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
829 	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
830 	.init		= mlxsw_pci_eq_init,
831 	.fini		= mlxsw_pci_eq_fini,
832 	.tasklet	= mlxsw_pci_eq_tasklet,
833 	.elem_count	= MLXSW_PCI_EQE_COUNT,
834 	.elem_size	= MLXSW_PCI_EQE_SIZE
835 };
836 
837 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
838 				const struct mlxsw_pci_queue_ops *q_ops,
839 				struct mlxsw_pci_queue *q, u8 q_num)
840 {
841 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
842 	int i;
843 	int err;
844 
845 	q->num = q_num;
846 	if (q_ops->pre_init)
847 		q_ops->pre_init(mlxsw_pci, q);
848 
849 	spin_lock_init(&q->lock);
850 	q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
851 					 q_ops->elem_count;
852 	q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
853 					    q_ops->elem_size;
854 	q->type = q_ops->type;
855 	q->pci = mlxsw_pci;
856 
857 	if (q_ops->tasklet)
858 		tasklet_setup(&q->tasklet, q_ops->tasklet);
859 
860 	mem_item->size = MLXSW_PCI_AQ_SIZE;
861 	mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
862 					   mem_item->size, &mem_item->mapaddr,
863 					   GFP_KERNEL);
864 	if (!mem_item->buf)
865 		return -ENOMEM;
866 
867 	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
868 	if (!q->elem_info) {
869 		err = -ENOMEM;
870 		goto err_elem_info_alloc;
871 	}
872 
873 	/* Initialize dma mapped elements info elem_info for
874 	 * future easy access.
875 	 */
876 	for (i = 0; i < q->count; i++) {
877 		struct mlxsw_pci_queue_elem_info *elem_info;
878 
879 		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
880 		elem_info->elem =
881 			__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
882 	}
883 
884 	mlxsw_cmd_mbox_zero(mbox);
885 	err = q_ops->init(mlxsw_pci, mbox, q);
886 	if (err)
887 		goto err_q_ops_init;
888 	return 0;
889 
890 err_q_ops_init:
891 	kfree(q->elem_info);
892 err_elem_info_alloc:
893 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
894 			  mem_item->buf, mem_item->mapaddr);
895 	return err;
896 }
897 
898 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
899 				 const struct mlxsw_pci_queue_ops *q_ops,
900 				 struct mlxsw_pci_queue *q)
901 {
902 	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
903 
904 	q_ops->fini(mlxsw_pci, q);
905 	kfree(q->elem_info);
906 	dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
907 			  mem_item->buf, mem_item->mapaddr);
908 }
909 
910 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
911 				      const struct mlxsw_pci_queue_ops *q_ops,
912 				      u8 num_qs)
913 {
914 	struct mlxsw_pci_queue_type_group *queue_group;
915 	int i;
916 	int err;
917 
918 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
919 	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
920 	if (!queue_group->q)
921 		return -ENOMEM;
922 
923 	for (i = 0; i < num_qs; i++) {
924 		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
925 					   &queue_group->q[i], i);
926 		if (err)
927 			goto err_queue_init;
928 	}
929 	queue_group->count = num_qs;
930 
931 	return 0;
932 
933 err_queue_init:
934 	for (i--; i >= 0; i--)
935 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
936 	kfree(queue_group->q);
937 	return err;
938 }
939 
940 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
941 				       const struct mlxsw_pci_queue_ops *q_ops)
942 {
943 	struct mlxsw_pci_queue_type_group *queue_group;
944 	int i;
945 
946 	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
947 	for (i = 0; i < queue_group->count; i++)
948 		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
949 	kfree(queue_group->q);
950 }
951 
952 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
953 {
954 	struct pci_dev *pdev = mlxsw_pci->pdev;
955 	u8 num_sdqs;
956 	u8 sdq_log2sz;
957 	u8 num_rdqs;
958 	u8 rdq_log2sz;
959 	u8 num_cqs;
960 	u8 cq_log2sz;
961 	u8 cqv2_log2sz;
962 	u8 num_eqs;
963 	u8 eq_log2sz;
964 	int err;
965 
966 	mlxsw_cmd_mbox_zero(mbox);
967 	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
968 	if (err)
969 		return err;
970 
971 	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
972 	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
973 	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
974 	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
975 	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
976 	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
977 	cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
978 	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
979 	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
980 
981 	if (num_sdqs + num_rdqs > num_cqs ||
982 	    num_sdqs < MLXSW_PCI_SDQS_MIN ||
983 	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
984 		dev_err(&pdev->dev, "Unsupported number of queues\n");
985 		return -EINVAL;
986 	}
987 
988 	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
989 	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
990 	    (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
991 	    (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
992 	     (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
993 	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
994 		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
995 		return -EINVAL;
996 	}
997 
998 	mlxsw_pci->num_sdq_cqs = num_sdqs;
999 
1000 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1001 					 num_eqs);
1002 	if (err) {
1003 		dev_err(&pdev->dev, "Failed to initialize event queues\n");
1004 		return err;
1005 	}
1006 
1007 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1008 					 num_cqs);
1009 	if (err) {
1010 		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1011 		goto err_cqs_init;
1012 	}
1013 
1014 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1015 					 num_sdqs);
1016 	if (err) {
1017 		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1018 		goto err_sdqs_init;
1019 	}
1020 
1021 	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1022 					 num_rdqs);
1023 	if (err) {
1024 		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1025 		goto err_rdqs_init;
1026 	}
1027 
1028 	/* We have to poll in command interface until queues are initialized */
1029 	mlxsw_pci->cmd.nopoll = true;
1030 	return 0;
1031 
1032 err_rdqs_init:
1033 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1034 err_sdqs_init:
1035 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1036 err_cqs_init:
1037 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1038 	return err;
1039 }
1040 
1041 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1042 {
1043 	mlxsw_pci->cmd.nopoll = false;
1044 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1045 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1046 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1047 	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1048 }
1049 
1050 static void
1051 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1052 				     char *mbox, int index,
1053 				     const struct mlxsw_swid_config *swid)
1054 {
1055 	u8 mask = 0;
1056 
1057 	if (swid->used_type) {
1058 		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1059 			mbox, index, swid->type);
1060 		mask |= 1;
1061 	}
1062 	if (swid->used_properties) {
1063 		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1064 			mbox, index, swid->properties);
1065 		mask |= 2;
1066 	}
1067 	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1068 }
1069 
1070 static int
1071 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1072 				const struct mlxsw_config_profile *profile,
1073 				struct mlxsw_res *res)
1074 {
1075 	u64 single_size, double_size, linear_size;
1076 	int err;
1077 
1078 	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1079 				       &single_size, &double_size,
1080 				       &linear_size);
1081 	if (err)
1082 		return err;
1083 
1084 	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1085 	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1086 	MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1087 
1088 	return 0;
1089 }
1090 
1091 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1092 				    const struct mlxsw_config_profile *profile,
1093 				    struct mlxsw_res *res)
1094 {
1095 	int i;
1096 	int err;
1097 
1098 	mlxsw_cmd_mbox_zero(mbox);
1099 
1100 	if (profile->used_max_vepa_channels) {
1101 		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1102 			mbox, 1);
1103 		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1104 			mbox, profile->max_vepa_channels);
1105 	}
1106 	if (profile->used_max_mid) {
1107 		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1108 			mbox, 1);
1109 		mlxsw_cmd_mbox_config_profile_max_mid_set(
1110 			mbox, profile->max_mid);
1111 	}
1112 	if (profile->used_max_pgt) {
1113 		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1114 			mbox, 1);
1115 		mlxsw_cmd_mbox_config_profile_max_pgt_set(
1116 			mbox, profile->max_pgt);
1117 	}
1118 	if (profile->used_max_system_port) {
1119 		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1120 			mbox, 1);
1121 		mlxsw_cmd_mbox_config_profile_max_system_port_set(
1122 			mbox, profile->max_system_port);
1123 	}
1124 	if (profile->used_max_vlan_groups) {
1125 		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1126 			mbox, 1);
1127 		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1128 			mbox, profile->max_vlan_groups);
1129 	}
1130 	if (profile->used_max_regions) {
1131 		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1132 			mbox, 1);
1133 		mlxsw_cmd_mbox_config_profile_max_regions_set(
1134 			mbox, profile->max_regions);
1135 	}
1136 	if (profile->used_flood_tables) {
1137 		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1138 			mbox, 1);
1139 		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1140 			mbox, profile->max_flood_tables);
1141 		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1142 			mbox, profile->max_vid_flood_tables);
1143 		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1144 			mbox, profile->max_fid_offset_flood_tables);
1145 		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1146 			mbox, profile->fid_offset_flood_table_size);
1147 		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1148 			mbox, profile->max_fid_flood_tables);
1149 		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1150 			mbox, profile->fid_flood_table_size);
1151 	}
1152 	if (profile->used_flood_mode) {
1153 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1154 			mbox, 1);
1155 		mlxsw_cmd_mbox_config_profile_flood_mode_set(
1156 			mbox, profile->flood_mode);
1157 	}
1158 	if (profile->used_max_ib_mc) {
1159 		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1160 			mbox, 1);
1161 		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1162 			mbox, profile->max_ib_mc);
1163 	}
1164 	if (profile->used_max_pkey) {
1165 		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1166 			mbox, 1);
1167 		mlxsw_cmd_mbox_config_profile_max_pkey_set(
1168 			mbox, profile->max_pkey);
1169 	}
1170 	if (profile->used_ar_sec) {
1171 		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1172 			mbox, 1);
1173 		mlxsw_cmd_mbox_config_profile_ar_sec_set(
1174 			mbox, profile->ar_sec);
1175 	}
1176 	if (profile->used_adaptive_routing_group_cap) {
1177 		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1178 			mbox, 1);
1179 		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1180 			mbox, profile->adaptive_routing_group_cap);
1181 	}
1182 	if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1183 		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1184 		if (err)
1185 			return err;
1186 
1187 		mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1188 		mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1189 					MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1190 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1191 									   1);
1192 		mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1193 					MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1194 		mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1195 								mbox, 1);
1196 		mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1197 					MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1198 	}
1199 	if (profile->used_kvh_xlt_cache_mode) {
1200 		mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
1201 			mbox, 1);
1202 		mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
1203 			mbox, profile->kvh_xlt_cache_mode);
1204 	}
1205 
1206 	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1207 		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1208 						     &profile->swid_config[i]);
1209 
1210 	if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1211 		mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1212 		mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1213 	}
1214 
1215 	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1216 }
1217 
1218 static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
1219 					  struct mlxsw_bus_info *bus_info,
1220 					  char *mbox)
1221 {
1222 	int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
1223 	int i;
1224 
1225 	if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
1226 		return 0;
1227 
1228 	bus_info->xm_exists = true;
1229 
1230 	if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
1231 		dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
1232 		return -EINVAL;
1233 	}
1234 	bus_info->xm_local_ports_count = count;
1235 	for (i = 0; i < count; i++)
1236 		bus_info->xm_local_ports[i] =
1237 			mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
1238 									 i);
1239 	return 0;
1240 }
1241 
1242 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1243 {
1244 	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1245 	int err;
1246 
1247 	mlxsw_cmd_mbox_zero(mbox);
1248 	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1249 	if (err)
1250 		return err;
1251 	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1252 	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1253 
1254 	return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
1255 }
1256 
1257 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1258 				  u16 num_pages)
1259 {
1260 	struct mlxsw_pci_mem_item *mem_item;
1261 	int nent = 0;
1262 	int i;
1263 	int err;
1264 
1265 	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1266 					   GFP_KERNEL);
1267 	if (!mlxsw_pci->fw_area.items)
1268 		return -ENOMEM;
1269 	mlxsw_pci->fw_area.count = num_pages;
1270 
1271 	mlxsw_cmd_mbox_zero(mbox);
1272 	for (i = 0; i < num_pages; i++) {
1273 		mem_item = &mlxsw_pci->fw_area.items[i];
1274 
1275 		mem_item->size = MLXSW_PCI_PAGE_SIZE;
1276 		mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1277 						   mem_item->size,
1278 						   &mem_item->mapaddr, GFP_KERNEL);
1279 		if (!mem_item->buf) {
1280 			err = -ENOMEM;
1281 			goto err_alloc;
1282 		}
1283 		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1284 		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1285 		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1286 			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1287 			if (err)
1288 				goto err_cmd_map_fa;
1289 			nent = 0;
1290 			mlxsw_cmd_mbox_zero(mbox);
1291 		}
1292 	}
1293 
1294 	if (nent) {
1295 		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1296 		if (err)
1297 			goto err_cmd_map_fa;
1298 	}
1299 
1300 	return 0;
1301 
1302 err_cmd_map_fa:
1303 err_alloc:
1304 	for (i--; i >= 0; i--) {
1305 		mem_item = &mlxsw_pci->fw_area.items[i];
1306 
1307 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1308 				  mem_item->buf, mem_item->mapaddr);
1309 	}
1310 	kfree(mlxsw_pci->fw_area.items);
1311 	return err;
1312 }
1313 
1314 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1315 {
1316 	struct mlxsw_pci_mem_item *mem_item;
1317 	int i;
1318 
1319 	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1320 
1321 	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1322 		mem_item = &mlxsw_pci->fw_area.items[i];
1323 
1324 		dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1325 				  mem_item->buf, mem_item->mapaddr);
1326 	}
1327 	kfree(mlxsw_pci->fw_area.items);
1328 }
1329 
1330 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1331 {
1332 	struct mlxsw_pci *mlxsw_pci = dev_id;
1333 	struct mlxsw_pci_queue *q;
1334 	int i;
1335 
1336 	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1337 		q = mlxsw_pci_eq_get(mlxsw_pci, i);
1338 		mlxsw_pci_queue_tasklet_schedule(q);
1339 	}
1340 	return IRQ_HANDLED;
1341 }
1342 
1343 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1344 				struct mlxsw_pci_mem_item *mbox)
1345 {
1346 	struct pci_dev *pdev = mlxsw_pci->pdev;
1347 	int err = 0;
1348 
1349 	mbox->size = MLXSW_CMD_MBOX_SIZE;
1350 	mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1351 				       &mbox->mapaddr, GFP_KERNEL);
1352 	if (!mbox->buf) {
1353 		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1354 		err = -ENOMEM;
1355 	}
1356 
1357 	return err;
1358 }
1359 
1360 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1361 				struct mlxsw_pci_mem_item *mbox)
1362 {
1363 	struct pci_dev *pdev = mlxsw_pci->pdev;
1364 
1365 	dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1366 			  mbox->mapaddr);
1367 }
1368 
1369 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1370 				    const struct pci_device_id *id,
1371 				    u32 *p_sys_status)
1372 {
1373 	unsigned long end;
1374 	u32 val;
1375 
1376 	if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1377 		msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1378 		return 0;
1379 	}
1380 
1381 	/* We must wait for the HW to become responsive. */
1382 	msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1383 
1384 	end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1385 	do {
1386 		val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1387 		if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1388 			return 0;
1389 		cond_resched();
1390 	} while (time_before(jiffies, end));
1391 
1392 	*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1393 
1394 	return -EBUSY;
1395 }
1396 
1397 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1398 			      const struct pci_device_id *id)
1399 {
1400 	struct pci_dev *pdev = mlxsw_pci->pdev;
1401 	char mrsr_pl[MLXSW_REG_MRSR_LEN];
1402 	u32 sys_status;
1403 	int err;
1404 
1405 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1406 	if (err) {
1407 		dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1408 			sys_status);
1409 		return err;
1410 	}
1411 
1412 	mlxsw_reg_mrsr_pack(mrsr_pl);
1413 	err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1414 	if (err)
1415 		return err;
1416 
1417 	err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1418 	if (err) {
1419 		dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1420 			sys_status);
1421 		return err;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1428 {
1429 	int err;
1430 
1431 	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1432 	if (err < 0)
1433 		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1434 	return err;
1435 }
1436 
1437 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1438 {
1439 	pci_free_irq_vectors(mlxsw_pci->pdev);
1440 }
1441 
1442 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1443 			  const struct mlxsw_config_profile *profile,
1444 			  struct mlxsw_res *res)
1445 {
1446 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1447 	struct pci_dev *pdev = mlxsw_pci->pdev;
1448 	char *mbox;
1449 	u16 num_pages;
1450 	int err;
1451 
1452 	mlxsw_pci->core = mlxsw_core;
1453 
1454 	mbox = mlxsw_cmd_mbox_alloc();
1455 	if (!mbox)
1456 		return -ENOMEM;
1457 
1458 	err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1459 	if (err)
1460 		goto err_sw_reset;
1461 
1462 	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1463 	if (err < 0) {
1464 		dev_err(&pdev->dev, "MSI-X init failed\n");
1465 		goto err_alloc_irq;
1466 	}
1467 
1468 	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1469 	if (err)
1470 		goto err_query_fw;
1471 
1472 	mlxsw_pci->bus_info.fw_rev.major =
1473 		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1474 	mlxsw_pci->bus_info.fw_rev.minor =
1475 		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1476 	mlxsw_pci->bus_info.fw_rev.subminor =
1477 		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1478 
1479 	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1480 		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1481 		err = -EINVAL;
1482 		goto err_iface_rev;
1483 	}
1484 	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1485 		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1486 		err = -EINVAL;
1487 		goto err_doorbell_page_bar;
1488 	}
1489 
1490 	mlxsw_pci->doorbell_offset =
1491 		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1492 
1493 	if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1494 		dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1495 		err = -EINVAL;
1496 		goto err_fr_rn_clk_bar;
1497 	}
1498 
1499 	mlxsw_pci->free_running_clock_offset =
1500 		mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1501 
1502 	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1503 	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1504 	if (err)
1505 		goto err_fw_area_init;
1506 
1507 	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1508 	if (err)
1509 		goto err_boardinfo;
1510 
1511 	err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1512 	if (err)
1513 		goto err_query_resources;
1514 
1515 	if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1516 	    MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1517 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1518 	else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1519 		 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1520 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1521 	else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1522 		  MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1523 		 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1524 		mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1525 	} else {
1526 		dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1527 		goto err_cqe_v_check;
1528 	}
1529 
1530 	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1531 	if (err)
1532 		goto err_config_profile;
1533 
1534 	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1535 	if (err)
1536 		goto err_aqs_init;
1537 
1538 	err = request_irq(pci_irq_vector(pdev, 0),
1539 			  mlxsw_pci_eq_irq_handler, 0,
1540 			  mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1541 	if (err) {
1542 		dev_err(&pdev->dev, "IRQ request failed\n");
1543 		goto err_request_eq_irq;
1544 	}
1545 
1546 	goto mbox_put;
1547 
1548 err_request_eq_irq:
1549 	mlxsw_pci_aqs_fini(mlxsw_pci);
1550 err_aqs_init:
1551 err_config_profile:
1552 err_cqe_v_check:
1553 err_query_resources:
1554 err_boardinfo:
1555 	mlxsw_pci_fw_area_fini(mlxsw_pci);
1556 err_fw_area_init:
1557 err_fr_rn_clk_bar:
1558 err_doorbell_page_bar:
1559 err_iface_rev:
1560 err_query_fw:
1561 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1562 err_alloc_irq:
1563 err_sw_reset:
1564 mbox_put:
1565 	mlxsw_cmd_mbox_free(mbox);
1566 	return err;
1567 }
1568 
1569 static void mlxsw_pci_fini(void *bus_priv)
1570 {
1571 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1572 
1573 	free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1574 	mlxsw_pci_aqs_fini(mlxsw_pci);
1575 	mlxsw_pci_fw_area_fini(mlxsw_pci);
1576 	mlxsw_pci_free_irq_vectors(mlxsw_pci);
1577 }
1578 
1579 static struct mlxsw_pci_queue *
1580 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1581 		   const struct mlxsw_tx_info *tx_info)
1582 {
1583 	u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1584 	u8 sdqn;
1585 
1586 	if (tx_info->is_emad) {
1587 		sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1588 	} else {
1589 		BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1590 		sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1591 	}
1592 
1593 	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1594 }
1595 
1596 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1597 					const struct mlxsw_tx_info *tx_info)
1598 {
1599 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1600 	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1601 
1602 	return !mlxsw_pci_queue_elem_info_producer_get(q);
1603 }
1604 
1605 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1606 				  const struct mlxsw_tx_info *tx_info)
1607 {
1608 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1609 	struct mlxsw_pci_queue *q;
1610 	struct mlxsw_pci_queue_elem_info *elem_info;
1611 	char *wqe;
1612 	int i;
1613 	int err;
1614 
1615 	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1616 		err = skb_linearize(skb);
1617 		if (err)
1618 			return err;
1619 	}
1620 
1621 	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1622 	spin_lock_bh(&q->lock);
1623 	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1624 	if (!elem_info) {
1625 		/* queue is full */
1626 		err = -EAGAIN;
1627 		goto unlock;
1628 	}
1629 	mlxsw_skb_cb(skb)->tx_info = *tx_info;
1630 	elem_info->u.sdq.skb = skb;
1631 
1632 	wqe = elem_info->elem;
1633 	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1634 	mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1635 	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1636 
1637 	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1638 				     skb_headlen(skb), DMA_TO_DEVICE);
1639 	if (err)
1640 		goto unlock;
1641 
1642 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1643 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1644 
1645 		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1646 					     skb_frag_address(frag),
1647 					     skb_frag_size(frag),
1648 					     DMA_TO_DEVICE);
1649 		if (err)
1650 			goto unmap_frags;
1651 	}
1652 
1653 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1654 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1655 
1656 	/* Set unused sq entries byte count to zero. */
1657 	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1658 		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1659 
1660 	/* Everything is set up, ring producer doorbell to get HW going */
1661 	q->producer_counter++;
1662 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1663 
1664 	goto unlock;
1665 
1666 unmap_frags:
1667 	for (; i >= 0; i--)
1668 		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1669 unlock:
1670 	spin_unlock_bh(&q->lock);
1671 	return err;
1672 }
1673 
1674 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1675 			      u32 in_mod, bool out_mbox_direct,
1676 			      char *in_mbox, size_t in_mbox_size,
1677 			      char *out_mbox, size_t out_mbox_size,
1678 			      u8 *p_status)
1679 {
1680 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1681 	dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1682 	bool evreq = mlxsw_pci->cmd.nopoll;
1683 	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1684 	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1685 	int err;
1686 
1687 	*p_status = MLXSW_CMD_STATUS_OK;
1688 
1689 	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1690 	if (err)
1691 		return err;
1692 
1693 	if (in_mbox) {
1694 		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1695 		in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1696 	}
1697 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1698 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1699 
1700 	if (out_mbox)
1701 		out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1702 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1703 	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1704 
1705 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1706 	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1707 
1708 	*p_wait_done = false;
1709 
1710 	wmb(); /* all needs to be written before we write control register */
1711 	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1712 			  MLXSW_PCI_CIR_CTRL_GO_BIT |
1713 			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1714 			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1715 			  opcode);
1716 
1717 	if (!evreq) {
1718 		unsigned long end;
1719 
1720 		end = jiffies + timeout;
1721 		do {
1722 			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1723 
1724 			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1725 				*p_wait_done = true;
1726 				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1727 				break;
1728 			}
1729 			cond_resched();
1730 		} while (time_before(jiffies, end));
1731 	} else {
1732 		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1733 		*p_status = mlxsw_pci->cmd.comp.status;
1734 	}
1735 
1736 	err = 0;
1737 	if (*p_wait_done) {
1738 		if (*p_status)
1739 			err = -EIO;
1740 	} else {
1741 		err = -ETIMEDOUT;
1742 	}
1743 
1744 	if (!err && out_mbox && out_mbox_direct) {
1745 		/* Some commands don't use output param as address to mailbox
1746 		 * but they store output directly into registers. In that case,
1747 		 * copy registers into mbox buffer.
1748 		 */
1749 		__be32 tmp;
1750 
1751 		if (!evreq) {
1752 			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1753 							   CIR_OUT_PARAM_HI));
1754 			memcpy(out_mbox, &tmp, sizeof(tmp));
1755 			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1756 							   CIR_OUT_PARAM_LO));
1757 			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1758 		}
1759 	} else if (!err && out_mbox) {
1760 		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1761 	}
1762 
1763 	mutex_unlock(&mlxsw_pci->cmd.lock);
1764 
1765 	return err;
1766 }
1767 
1768 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1769 {
1770 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1771 	u64 frc_offset;
1772 
1773 	frc_offset = mlxsw_pci->free_running_clock_offset;
1774 	return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1775 }
1776 
1777 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1778 {
1779 	struct mlxsw_pci *mlxsw_pci = bus_priv;
1780 	u64 frc_offset;
1781 
1782 	frc_offset = mlxsw_pci->free_running_clock_offset;
1783 	return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1784 }
1785 
1786 static const struct mlxsw_bus mlxsw_pci_bus = {
1787 	.kind			= "pci",
1788 	.init			= mlxsw_pci_init,
1789 	.fini			= mlxsw_pci_fini,
1790 	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
1791 	.skb_transmit		= mlxsw_pci_skb_transmit,
1792 	.cmd_exec		= mlxsw_pci_cmd_exec,
1793 	.read_frc_h		= mlxsw_pci_read_frc_h,
1794 	.read_frc_l		= mlxsw_pci_read_frc_l,
1795 	.features		= MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1796 };
1797 
1798 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
1799 {
1800 	int err;
1801 
1802 	mutex_init(&mlxsw_pci->cmd.lock);
1803 	init_waitqueue_head(&mlxsw_pci->cmd.wait);
1804 
1805 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1806 	if (err)
1807 		goto err_in_mbox_alloc;
1808 
1809 	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1810 	if (err)
1811 		goto err_out_mbox_alloc;
1812 
1813 	return 0;
1814 
1815 err_out_mbox_alloc:
1816 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1817 err_in_mbox_alloc:
1818 	mutex_destroy(&mlxsw_pci->cmd.lock);
1819 	return err;
1820 }
1821 
1822 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
1823 {
1824 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1825 	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1826 	mutex_destroy(&mlxsw_pci->cmd.lock);
1827 }
1828 
1829 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1830 {
1831 	const char *driver_name = pdev->driver->name;
1832 	struct mlxsw_pci *mlxsw_pci;
1833 	int err;
1834 
1835 	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1836 	if (!mlxsw_pci)
1837 		return -ENOMEM;
1838 
1839 	err = pci_enable_device(pdev);
1840 	if (err) {
1841 		dev_err(&pdev->dev, "pci_enable_device failed\n");
1842 		goto err_pci_enable_device;
1843 	}
1844 
1845 	err = pci_request_regions(pdev, driver_name);
1846 	if (err) {
1847 		dev_err(&pdev->dev, "pci_request_regions failed\n");
1848 		goto err_pci_request_regions;
1849 	}
1850 
1851 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1852 	if (err) {
1853 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1854 		if (err) {
1855 			dev_err(&pdev->dev, "dma_set_mask failed\n");
1856 			goto err_pci_set_dma_mask;
1857 		}
1858 	}
1859 
1860 	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1861 		dev_err(&pdev->dev, "invalid PCI region size\n");
1862 		err = -EINVAL;
1863 		goto err_pci_resource_len_check;
1864 	}
1865 
1866 	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1867 				     pci_resource_len(pdev, 0));
1868 	if (!mlxsw_pci->hw_addr) {
1869 		dev_err(&pdev->dev, "ioremap failed\n");
1870 		err = -EIO;
1871 		goto err_ioremap;
1872 	}
1873 	pci_set_master(pdev);
1874 
1875 	mlxsw_pci->pdev = pdev;
1876 	pci_set_drvdata(pdev, mlxsw_pci);
1877 
1878 	err = mlxsw_pci_cmd_init(mlxsw_pci);
1879 	if (err)
1880 		goto err_pci_cmd_init;
1881 
1882 	mlxsw_pci->bus_info.device_kind = driver_name;
1883 	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1884 	mlxsw_pci->bus_info.dev = &pdev->dev;
1885 	mlxsw_pci->bus_info.read_frc_capable = true;
1886 	mlxsw_pci->id = id;
1887 
1888 	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1889 					     &mlxsw_pci_bus, mlxsw_pci, false,
1890 					     NULL, NULL);
1891 	if (err) {
1892 		dev_err(&pdev->dev, "cannot register bus device\n");
1893 		goto err_bus_device_register;
1894 	}
1895 
1896 	return 0;
1897 
1898 err_bus_device_register:
1899 	mlxsw_pci_cmd_fini(mlxsw_pci);
1900 err_pci_cmd_init:
1901 	iounmap(mlxsw_pci->hw_addr);
1902 err_ioremap:
1903 err_pci_resource_len_check:
1904 err_pci_set_dma_mask:
1905 	pci_release_regions(pdev);
1906 err_pci_request_regions:
1907 	pci_disable_device(pdev);
1908 err_pci_enable_device:
1909 	kfree(mlxsw_pci);
1910 	return err;
1911 }
1912 
1913 static void mlxsw_pci_remove(struct pci_dev *pdev)
1914 {
1915 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1916 
1917 	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1918 	mlxsw_pci_cmd_fini(mlxsw_pci);
1919 	iounmap(mlxsw_pci->hw_addr);
1920 	pci_release_regions(mlxsw_pci->pdev);
1921 	pci_disable_device(mlxsw_pci->pdev);
1922 	kfree(mlxsw_pci);
1923 }
1924 
1925 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1926 {
1927 	pci_driver->probe = mlxsw_pci_probe;
1928 	pci_driver->remove = mlxsw_pci_remove;
1929 	return pci_register_driver(pci_driver);
1930 }
1931 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1932 
1933 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1934 {
1935 	pci_unregister_driver(pci_driver);
1936 }
1937 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1938 
1939 static int __init mlxsw_pci_module_init(void)
1940 {
1941 	return 0;
1942 }
1943 
1944 static void __exit mlxsw_pci_module_exit(void)
1945 {
1946 }
1947 
1948 module_init(mlxsw_pci_module_init);
1949 module_exit(mlxsw_pci_module_exit);
1950 
1951 MODULE_LICENSE("Dual BSD/GPL");
1952 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1953 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1954