1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/errno.h>
10 #include <linux/pci.h>
11 #include <linux/device.h>
12 #include <linux/workqueue.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/log2.h>
17 #include <asm/byteorder.h>
18 #include <asm/barrier.h>
19 
20 #include "hinic_hw_csr.h"
21 #include "hinic_hw_if.h"
22 #include "hinic_hw_eqs.h"
23 
24 #define HINIC_EQS_WQ_NAME                       "hinic_eqs"
25 
26 #define GET_EQ_NUM_PAGES(eq, pg_size)           \
27 		(ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
28 
29 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size)     ((pg_size) / (eq)->elem_size)
30 
31 #define EQ_CONS_IDX_REG_ADDR(eq)        (((eq)->type == HINIC_AEQ) ? \
32 			HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
33 			HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
34 
35 #define EQ_PROD_IDX_REG_ADDR(eq)        (((eq)->type == HINIC_AEQ) ? \
36 			HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
37 			HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
38 
39 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
40 			HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
41 			HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
42 
43 #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
44 			HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
45 			HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
46 
47 #define GET_EQ_ELEMENT(eq, idx)         \
48 		((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
49 		 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
50 
51 #define GET_AEQ_ELEM(eq, idx)           ((struct hinic_aeq_elem *) \
52 					GET_EQ_ELEMENT(eq, idx))
53 
54 #define GET_CEQ_ELEM(eq, idx)           ((u32 *) \
55 					 GET_EQ_ELEMENT(eq, idx))
56 
57 #define GET_CURR_AEQ_ELEM(eq)           GET_AEQ_ELEM(eq, (eq)->cons_idx)
58 
59 #define GET_CURR_CEQ_ELEM(eq)           GET_CEQ_ELEM(eq, (eq)->cons_idx)
60 
61 #define PAGE_IN_4K(page_size)           ((page_size) >> 12)
62 #define EQ_SET_HW_PAGE_SIZE_VAL(eq)     (ilog2(PAGE_IN_4K((eq)->page_size)))
63 
64 #define ELEMENT_SIZE_IN_32B(eq)         (((eq)->elem_size) >> 5)
65 #define EQ_SET_HW_ELEM_SIZE_VAL(eq)     (ilog2(ELEMENT_SIZE_IN_32B(eq)))
66 
67 #define EQ_MAX_PAGES                    8
68 
69 #define CEQE_TYPE_SHIFT                 23
70 #define CEQE_TYPE_MASK                  0x7
71 
72 #define CEQE_TYPE(ceqe)                 (((ceqe) >> CEQE_TYPE_SHIFT) &  \
73 					 CEQE_TYPE_MASK)
74 
75 #define CEQE_DATA_MASK                  0x3FFFFFF
76 #define CEQE_DATA(ceqe)                 ((ceqe) & CEQE_DATA_MASK)
77 
78 #define aeq_to_aeqs(eq)                 \
79 		container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
80 
81 #define ceq_to_ceqs(eq)                 \
82 		container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
83 
84 #define work_to_aeq_work(work)          \
85 		container_of(work, struct hinic_eq_work, work)
86 
87 #define DMA_ATTR_AEQ_DEFAULT            0
88 #define DMA_ATTR_CEQ_DEFAULT            0
89 
90 /* No coalescence */
91 #define THRESH_CEQ_DEFAULT              0
92 
93 enum eq_int_mode {
94 	EQ_INT_MODE_ARMED,
95 	EQ_INT_MODE_ALWAYS
96 };
97 
98 enum eq_arm_state {
99 	EQ_NOT_ARMED,
100 	EQ_ARMED
101 };
102 
103 /**
104  * hinic_aeq_register_hw_cb - register AEQ callback for specific event
105  * @aeqs: pointer to Async eqs of the chip
106  * @event: aeq event to register callback for it
107  * @handle: private data will be used by the callback
108  * @hw_handler: callback function
109  **/
110 void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
111 			      enum hinic_aeq_type event, void *handle,
112 			      void (*hwe_handler)(void *handle, void *data,
113 						  u8 size))
114 {
115 	struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
116 
117 	hwe_cb->hwe_handler = hwe_handler;
118 	hwe_cb->handle = handle;
119 	hwe_cb->hwe_state = HINIC_EQE_ENABLED;
120 }
121 
122 /**
123  * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
124  * @aeqs: pointer to Async eqs of the chip
125  * @event: aeq event to unregister callback for it
126  **/
127 void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
128 				enum hinic_aeq_type event)
129 {
130 	struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
131 
132 	hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
133 
134 	while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
135 		schedule();
136 
137 	hwe_cb->hwe_handler = NULL;
138 }
139 
140 /**
141  * hinic_ceq_register_cb - register CEQ callback for specific event
142  * @ceqs: pointer to Completion eqs part of the chip
143  * @event: ceq event to register callback for it
144  * @handle: private data will be used by the callback
145  * @handler: callback function
146  **/
147 void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
148 			   enum hinic_ceq_type event, void *handle,
149 			   void (*handler)(void *handle, u32 ceqe_data))
150 {
151 	struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
152 
153 	ceq_cb->handler = handler;
154 	ceq_cb->handle = handle;
155 	ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
156 }
157 
158 /**
159  * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
160  * @ceqs: pointer to Completion eqs part of the chip
161  * @event: ceq event to unregister callback for it
162  **/
163 void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
164 			     enum hinic_ceq_type event)
165 {
166 	struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
167 
168 	ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
169 
170 	while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
171 		schedule();
172 
173 	ceq_cb->handler = NULL;
174 }
175 
176 static u8 eq_cons_idx_checksum_set(u32 val)
177 {
178 	u8 checksum = 0;
179 	int idx;
180 
181 	for (idx = 0; idx < 32; idx += 4)
182 		checksum ^= ((val >> idx) & 0xF);
183 
184 	return (checksum & 0xF);
185 }
186 
187 /**
188  * eq_update_ci - update the HW cons idx of event queue
189  * @eq: the event queue to update the cons idx for
190  **/
191 static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
192 {
193 	u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
194 
195 	/* Read Modify Write */
196 	val = hinic_hwif_read_reg(eq->hwif, addr);
197 
198 	val = HINIC_EQ_CI_CLEAR(val, IDX)       &
199 	      HINIC_EQ_CI_CLEAR(val, WRAPPED)   &
200 	      HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
201 	      HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
202 
203 	val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX)    |
204 	       HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
205 	       HINIC_EQ_CI_SET(arm_state, INT_ARMED);
206 
207 	val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
208 
209 	hinic_hwif_write_reg(eq->hwif, addr, val);
210 }
211 
212 /**
213  * aeq_irq_handler - handler for the AEQ event
214  * @eq: the Async Event Queue that received the event
215  **/
216 static void aeq_irq_handler(struct hinic_eq *eq)
217 {
218 	struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
219 	struct hinic_hwif *hwif = aeqs->hwif;
220 	struct pci_dev *pdev = hwif->pdev;
221 	struct hinic_aeq_elem *aeqe_curr;
222 	struct hinic_hw_event_cb *hwe_cb;
223 	enum hinic_aeq_type event;
224 	unsigned long eqe_state;
225 	u32 aeqe_desc;
226 	int i, size;
227 
228 	for (i = 0; i < eq->q_len; i++) {
229 		aeqe_curr = GET_CURR_AEQ_ELEM(eq);
230 
231 		/* Data in HW is in Big endian Format */
232 		aeqe_desc = be32_to_cpu(aeqe_curr->desc);
233 
234 		/* HW toggles the wrapped bit, when it adds eq element */
235 		if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
236 			break;
237 
238 		dma_rmb();
239 
240 		event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
241 		if (event >= HINIC_MAX_AEQ_EVENTS) {
242 			dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
243 			return;
244 		}
245 
246 		if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
247 			hwe_cb = &aeqs->hwe_cb[event];
248 
249 			size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
250 
251 			eqe_state = cmpxchg(&hwe_cb->hwe_state,
252 					    HINIC_EQE_ENABLED,
253 					    HINIC_EQE_ENABLED |
254 					    HINIC_EQE_RUNNING);
255 			if ((eqe_state == HINIC_EQE_ENABLED) &&
256 			    (hwe_cb->hwe_handler))
257 				hwe_cb->hwe_handler(hwe_cb->handle,
258 						    aeqe_curr->data, size);
259 			else
260 				dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
261 					event);
262 
263 			hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
264 		}
265 
266 		eq->cons_idx++;
267 
268 		if (eq->cons_idx == eq->q_len) {
269 			eq->cons_idx = 0;
270 			eq->wrapped = !eq->wrapped;
271 		}
272 	}
273 }
274 
275 /**
276  * ceq_event_handler - handler for the ceq events
277  * @ceqs: ceqs part of the chip
278  * @ceqe: ceq element that describes the event
279  **/
280 static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
281 {
282 	struct hinic_hwif *hwif = ceqs->hwif;
283 	struct pci_dev *pdev = hwif->pdev;
284 	struct hinic_ceq_cb *ceq_cb;
285 	enum hinic_ceq_type event;
286 	unsigned long eqe_state;
287 
288 	event = CEQE_TYPE(ceqe);
289 	if (event >= HINIC_MAX_CEQ_EVENTS) {
290 		dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
291 		return;
292 	}
293 
294 	ceq_cb = &ceqs->ceq_cb[event];
295 
296 	eqe_state = cmpxchg(&ceq_cb->ceqe_state,
297 			    HINIC_EQE_ENABLED,
298 			    HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
299 
300 	if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
301 		ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
302 	else
303 		dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
304 
305 	ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
306 }
307 
308 /**
309  * ceq_irq_handler - handler for the CEQ event
310  * @eq: the Completion Event Queue that received the event
311  **/
312 static void ceq_irq_handler(struct hinic_eq *eq)
313 {
314 	struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
315 	u32 ceqe;
316 	int i;
317 
318 	for (i = 0; i < eq->q_len; i++) {
319 		ceqe = *(GET_CURR_CEQ_ELEM(eq));
320 
321 		/* Data in HW is in Big endian Format */
322 		ceqe = be32_to_cpu(ceqe);
323 
324 		/* HW toggles the wrapped bit, when it adds eq element event */
325 		if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
326 			break;
327 
328 		ceq_event_handler(ceqs, ceqe);
329 
330 		eq->cons_idx++;
331 
332 		if (eq->cons_idx == eq->q_len) {
333 			eq->cons_idx = 0;
334 			eq->wrapped = !eq->wrapped;
335 		}
336 	}
337 }
338 
339 /**
340  * eq_irq_handler - handler for the EQ event
341  * @data: the Event Queue that received the event
342  **/
343 static void eq_irq_handler(void *data)
344 {
345 	struct hinic_eq *eq = data;
346 
347 	if (eq->type == HINIC_AEQ)
348 		aeq_irq_handler(eq);
349 	else if (eq->type == HINIC_CEQ)
350 		ceq_irq_handler(eq);
351 
352 	eq_update_ci(eq, EQ_ARMED);
353 }
354 
355 /**
356  * eq_irq_work - the work of the EQ that received the event
357  * @work: the work struct that is associated with the EQ
358  **/
359 static void eq_irq_work(struct work_struct *work)
360 {
361 	struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
362 	struct hinic_eq *aeq;
363 
364 	aeq = aeq_work->data;
365 	eq_irq_handler(aeq);
366 }
367 
368 /**
369  * ceq_tasklet - the tasklet of the EQ that received the event
370  * @ceq_data: the eq
371  **/
372 static void ceq_tasklet(unsigned long ceq_data)
373 {
374 	struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
375 
376 	eq_irq_handler(ceq);
377 }
378 
379 /**
380  * aeq_interrupt - aeq interrupt handler
381  * @irq: irq number
382  * @data: the Async Event Queue that collected the event
383  **/
384 static irqreturn_t aeq_interrupt(int irq, void *data)
385 {
386 	struct hinic_eq_work *aeq_work;
387 	struct hinic_eq *aeq = data;
388 	struct hinic_aeqs *aeqs;
389 
390 	/* clear resend timer cnt register */
391 	hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
392 
393 	aeq_work = &aeq->aeq_work;
394 	aeq_work->data = aeq;
395 
396 	aeqs = aeq_to_aeqs(aeq);
397 	queue_work(aeqs->workq, &aeq_work->work);
398 
399 	return IRQ_HANDLED;
400 }
401 
402 /**
403  * ceq_interrupt - ceq interrupt handler
404  * @irq: irq number
405  * @data: the Completion Event Queue that collected the event
406  **/
407 static irqreturn_t ceq_interrupt(int irq, void *data)
408 {
409 	struct hinic_eq *ceq = data;
410 
411 	/* clear resend timer cnt register */
412 	hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
413 
414 	tasklet_schedule(&ceq->ceq_tasklet);
415 
416 	return IRQ_HANDLED;
417 }
418 
419 static void set_ctrl0(struct hinic_eq *eq)
420 {
421 	struct msix_entry *msix_entry = &eq->msix_entry;
422 	enum hinic_eq_type type = eq->type;
423 	u32 addr, val, ctrl0;
424 
425 	if (type == HINIC_AEQ) {
426 		/* RMW Ctrl0 */
427 		addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
428 
429 		val = hinic_hwif_read_reg(eq->hwif, addr);
430 
431 		val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX)      &
432 		      HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR)     &
433 		      HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
434 		      HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
435 
436 		ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX)     |
437 			HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
438 			HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
439 					     PCI_INTF_IDX)                   |
440 			HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
441 
442 		val |= ctrl0;
443 
444 		hinic_hwif_write_reg(eq->hwif, addr, val);
445 	} else if (type == HINIC_CEQ) {
446 		/* RMW Ctrl0 */
447 		addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
448 
449 		val = hinic_hwif_read_reg(eq->hwif, addr);
450 
451 		val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX)     &
452 		      HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR)     &
453 		      HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH)  &
454 		      HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
455 		      HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
456 
457 		ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX)     |
458 			HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR)  |
459 			HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
460 			HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
461 					     PCI_INTF_IDX)                    |
462 			HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
463 
464 		val |= ctrl0;
465 
466 		hinic_hwif_write_reg(eq->hwif, addr, val);
467 	}
468 }
469 
470 static void set_ctrl1(struct hinic_eq *eq)
471 {
472 	enum hinic_eq_type type = eq->type;
473 	u32 page_size_val, elem_size;
474 	u32 addr, val, ctrl1;
475 
476 	if (type == HINIC_AEQ) {
477 		/* RMW Ctrl1 */
478 		addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
479 
480 		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
481 		elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
482 
483 		val = hinic_hwif_read_reg(eq->hwif, addr);
484 
485 		val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN)          &
486 		      HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE)    &
487 		      HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
488 
489 		ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN)            |
490 			HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)      |
491 			HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
492 
493 		val |= ctrl1;
494 
495 		hinic_hwif_write_reg(eq->hwif, addr, val);
496 	} else if (type == HINIC_CEQ) {
497 		/* RMW Ctrl1 */
498 		addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
499 
500 		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
501 
502 		val = hinic_hwif_read_reg(eq->hwif, addr);
503 
504 		val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
505 		      HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
506 
507 		ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
508 			HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
509 
510 		val |= ctrl1;
511 
512 		hinic_hwif_write_reg(eq->hwif, addr, val);
513 	}
514 }
515 
516 /**
517  * set_eq_ctrls - setting eq's ctrl registers
518  * @eq: the Event Queue for setting
519  **/
520 static void set_eq_ctrls(struct hinic_eq *eq)
521 {
522 	set_ctrl0(eq);
523 	set_ctrl1(eq);
524 }
525 
526 /**
527  * aeq_elements_init - initialize all the elements in the aeq
528  * @eq: the Async Event Queue
529  * @init_val: value to initialize the elements with it
530  **/
531 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
532 {
533 	struct hinic_aeq_elem *aeqe;
534 	int i;
535 
536 	for (i = 0; i < eq->q_len; i++) {
537 		aeqe = GET_AEQ_ELEM(eq, i);
538 		aeqe->desc = cpu_to_be32(init_val);
539 	}
540 
541 	wmb();  /* Write the initilzation values */
542 }
543 
544 /**
545  * ceq_elements_init - Initialize all the elements in the ceq
546  * @eq: the event queue
547  * @init_val: value to init with it the elements
548  **/
549 static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
550 {
551 	u32 *ceqe;
552 	int i;
553 
554 	for (i = 0; i < eq->q_len; i++) {
555 		ceqe = GET_CEQ_ELEM(eq, i);
556 		*(ceqe) = cpu_to_be32(init_val);
557 	}
558 
559 	wmb();  /* Write the initilzation values */
560 }
561 
562 /**
563  * alloc_eq_pages - allocate the pages for the queue
564  * @eq: the event queue
565  *
566  * Return 0 - Success, Negative - Failure
567  **/
568 static int alloc_eq_pages(struct hinic_eq *eq)
569 {
570 	struct hinic_hwif *hwif = eq->hwif;
571 	struct pci_dev *pdev = hwif->pdev;
572 	u32 init_val, addr, val;
573 	size_t addr_size;
574 	int err, pg;
575 
576 	addr_size = eq->num_pages * sizeof(*eq->dma_addr);
577 	eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
578 	if (!eq->dma_addr)
579 		return -ENOMEM;
580 
581 	addr_size = eq->num_pages * sizeof(*eq->virt_addr);
582 	eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
583 	if (!eq->virt_addr) {
584 		err = -ENOMEM;
585 		goto err_virt_addr_alloc;
586 	}
587 
588 	for (pg = 0; pg < eq->num_pages; pg++) {
589 		eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
590 						       eq->page_size,
591 						       &eq->dma_addr[pg],
592 						       GFP_KERNEL);
593 		if (!eq->virt_addr[pg]) {
594 			err = -ENOMEM;
595 			goto err_dma_alloc;
596 		}
597 
598 		addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
599 		val = upper_32_bits(eq->dma_addr[pg]);
600 
601 		hinic_hwif_write_reg(hwif, addr, val);
602 
603 		addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
604 		val = lower_32_bits(eq->dma_addr[pg]);
605 
606 		hinic_hwif_write_reg(hwif, addr, val);
607 	}
608 
609 	init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
610 
611 	if (eq->type == HINIC_AEQ)
612 		aeq_elements_init(eq, init_val);
613 	else if (eq->type == HINIC_CEQ)
614 		ceq_elements_init(eq, init_val);
615 
616 	return 0;
617 
618 err_dma_alloc:
619 	while (--pg >= 0)
620 		dma_free_coherent(&pdev->dev, eq->page_size,
621 				  eq->virt_addr[pg],
622 				  eq->dma_addr[pg]);
623 
624 	devm_kfree(&pdev->dev, eq->virt_addr);
625 
626 err_virt_addr_alloc:
627 	devm_kfree(&pdev->dev, eq->dma_addr);
628 	return err;
629 }
630 
631 /**
632  * free_eq_pages - free the pages of the queue
633  * @eq: the Event Queue
634  **/
635 static void free_eq_pages(struct hinic_eq *eq)
636 {
637 	struct hinic_hwif *hwif = eq->hwif;
638 	struct pci_dev *pdev = hwif->pdev;
639 	int pg;
640 
641 	for (pg = 0; pg < eq->num_pages; pg++)
642 		dma_free_coherent(&pdev->dev, eq->page_size,
643 				  eq->virt_addr[pg],
644 				  eq->dma_addr[pg]);
645 
646 	devm_kfree(&pdev->dev, eq->virt_addr);
647 	devm_kfree(&pdev->dev, eq->dma_addr);
648 }
649 
650 /**
651  * init_eq - initialize Event Queue
652  * @eq: the event queue
653  * @hwif: the HW interface of a PCI function device
654  * @type: the type of the event queue, aeq or ceq
655  * @q_id: Queue id number
656  * @q_len: the number of EQ elements
657  * @page_size: the page size of the pages in the event queue
658  * @entry: msix entry associated with the event queue
659  *
660  * Return 0 - Success, Negative - Failure
661  **/
662 static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
663 		   enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
664 		   struct msix_entry entry)
665 {
666 	struct pci_dev *pdev = hwif->pdev;
667 	int err;
668 
669 	eq->hwif = hwif;
670 	eq->type = type;
671 	eq->q_id = q_id;
672 	eq->q_len = q_len;
673 	eq->page_size = page_size;
674 
675 	/* Clear PI and CI, also clear the ARM bit */
676 	hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
677 	hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
678 
679 	eq->cons_idx = 0;
680 	eq->wrapped = 0;
681 
682 	if (type == HINIC_AEQ) {
683 		eq->elem_size = HINIC_AEQE_SIZE;
684 	} else if (type == HINIC_CEQ) {
685 		eq->elem_size = HINIC_CEQE_SIZE;
686 	} else {
687 		dev_err(&pdev->dev, "Invalid EQ type\n");
688 		return -EINVAL;
689 	}
690 
691 	eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
692 	eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
693 
694 	eq->msix_entry = entry;
695 
696 	if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
697 		dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
698 		return -EINVAL;
699 	}
700 
701 	if (eq->num_pages > EQ_MAX_PAGES) {
702 		dev_err(&pdev->dev, "too many pages for eq\n");
703 		return -EINVAL;
704 	}
705 
706 	set_eq_ctrls(eq);
707 	eq_update_ci(eq, EQ_ARMED);
708 
709 	err = alloc_eq_pages(eq);
710 	if (err) {
711 		dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
712 		return err;
713 	}
714 
715 	if (type == HINIC_AEQ) {
716 		struct hinic_eq_work *aeq_work = &eq->aeq_work;
717 
718 		INIT_WORK(&aeq_work->work, eq_irq_work);
719 	} else if (type == HINIC_CEQ) {
720 		tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
721 			     (unsigned long)eq);
722 	}
723 
724 	/* set the attributes of the msix entry */
725 	hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
726 			    HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
727 			    HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
728 			    HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
729 			    HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
730 			    HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
731 
732 	if (type == HINIC_AEQ)
733 		err = request_irq(entry.vector, aeq_interrupt, 0,
734 				  "hinic_aeq", eq);
735 	else if (type == HINIC_CEQ)
736 		err = request_irq(entry.vector, ceq_interrupt, 0,
737 				  "hinic_ceq", eq);
738 
739 	if (err) {
740 		dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
741 		goto err_req_irq;
742 	}
743 
744 	return 0;
745 
746 err_req_irq:
747 	free_eq_pages(eq);
748 	return err;
749 }
750 
751 /**
752  * remove_eq - remove Event Queue
753  * @eq: the event queue
754  **/
755 static void remove_eq(struct hinic_eq *eq)
756 {
757 	hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
758 			     HINIC_MSIX_DISABLE);
759 	free_irq(eq->msix_entry.vector, eq);
760 
761 	if (eq->type == HINIC_AEQ) {
762 		struct hinic_eq_work *aeq_work = &eq->aeq_work;
763 
764 		cancel_work_sync(&aeq_work->work);
765 		/* clear aeq_len to avoid hw access host memory */
766 		hinic_hwif_write_reg(eq->hwif,
767 				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
768 	} else if (eq->type == HINIC_CEQ) {
769 		tasklet_kill(&eq->ceq_tasklet);
770 		/* clear ceq_len to avoid hw access host memory */
771 		hinic_hwif_write_reg(eq->hwif,
772 				     HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
773 	}
774 
775 	/* update cons_idx to avoid invalid interrupt */
776 	eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
777 	eq_update_ci(eq, EQ_NOT_ARMED);
778 
779 	free_eq_pages(eq);
780 }
781 
782 /**
783  * hinic_aeqs_init - initialize all the aeqs
784  * @aeqs: pointer to Async eqs of the chip
785  * @hwif: the HW interface of a PCI function device
786  * @num_aeqs: number of AEQs
787  * @q_len: number of EQ elements
788  * @page_size: the page size of the pages in the event queue
789  * @msix_entries: msix entries associated with the event queues
790  *
791  * Return 0 - Success, negative - Failure
792  **/
793 int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
794 		    int num_aeqs, u32 q_len, u32 page_size,
795 		    struct msix_entry *msix_entries)
796 {
797 	struct pci_dev *pdev = hwif->pdev;
798 	int err, i, q_id;
799 
800 	aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
801 	if (!aeqs->workq)
802 		return -ENOMEM;
803 
804 	aeqs->hwif = hwif;
805 	aeqs->num_aeqs = num_aeqs;
806 
807 	for (q_id = 0; q_id < num_aeqs; q_id++) {
808 		err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
809 			      page_size, msix_entries[q_id]);
810 		if (err) {
811 			dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
812 			goto err_init_aeq;
813 		}
814 	}
815 
816 	return 0;
817 
818 err_init_aeq:
819 	for (i = 0; i < q_id; i++)
820 		remove_eq(&aeqs->aeq[i]);
821 
822 	destroy_workqueue(aeqs->workq);
823 	return err;
824 }
825 
826 /**
827  * hinic_aeqs_free - free all the aeqs
828  * @aeqs: pointer to Async eqs of the chip
829  **/
830 void hinic_aeqs_free(struct hinic_aeqs *aeqs)
831 {
832 	int q_id;
833 
834 	for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
835 		remove_eq(&aeqs->aeq[q_id]);
836 
837 	destroy_workqueue(aeqs->workq);
838 }
839 
840 /**
841  * hinic_ceqs_init - init all the ceqs
842  * @ceqs: ceqs part of the chip
843  * @hwif: the hardware interface of a pci function device
844  * @num_ceqs: number of CEQs
845  * @q_len: number of EQ elements
846  * @page_size: the page size of the event queue
847  * @msix_entries: msix entries associated with the event queues
848  *
849  * Return 0 - Success, Negative - Failure
850  **/
851 int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
852 		    int num_ceqs, u32 q_len, u32 page_size,
853 		    struct msix_entry *msix_entries)
854 {
855 	struct pci_dev *pdev = hwif->pdev;
856 	int i, q_id, err;
857 
858 	ceqs->hwif = hwif;
859 	ceqs->num_ceqs = num_ceqs;
860 
861 	for (q_id = 0; q_id < num_ceqs; q_id++) {
862 		err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
863 			      page_size, msix_entries[q_id]);
864 		if (err) {
865 			dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
866 			goto err_init_ceq;
867 		}
868 	}
869 
870 	return 0;
871 
872 err_init_ceq:
873 	for (i = 0; i < q_id; i++)
874 		remove_eq(&ceqs->ceq[i]);
875 
876 	return err;
877 }
878 
879 /**
880  * hinic_ceqs_free - free all the ceqs
881  * @ceqs: ceqs part of the chip
882  **/
883 void hinic_ceqs_free(struct hinic_ceqs *ceqs)
884 {
885 	int q_id;
886 
887 	for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
888 		remove_eq(&ceqs->ceq[q_id]);
889 }
890