1*e10fc233SJames Smart // SPDX-License-Identifier: GPL-2.0
2*e10fc233SJames Smart /*
3*e10fc233SJames Smart * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4*e10fc233SJames Smart * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5*e10fc233SJames Smart */
6*e10fc233SJames Smart
7*e10fc233SJames Smart #include "efct_driver.h"
8*e10fc233SJames Smart #include "efct_hw.h"
9*e10fc233SJames Smart #include "efct_unsol.h"
10*e10fc233SJames Smart
11*e10fc233SJames Smart int
efct_hw_init_queues(struct efct_hw * hw)12*e10fc233SJames Smart efct_hw_init_queues(struct efct_hw *hw)
13*e10fc233SJames Smart {
14*e10fc233SJames Smart struct hw_eq *eq = NULL;
15*e10fc233SJames Smart struct hw_cq *cq = NULL;
16*e10fc233SJames Smart struct hw_wq *wq = NULL;
17*e10fc233SJames Smart struct hw_mq *mq = NULL;
18*e10fc233SJames Smart
19*e10fc233SJames Smart struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
20*e10fc233SJames Smart struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
21*e10fc233SJames Smart struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
22*e10fc233SJames Smart u32 i = 0, j;
23*e10fc233SJames Smart
24*e10fc233SJames Smart hw->eq_count = 0;
25*e10fc233SJames Smart hw->cq_count = 0;
26*e10fc233SJames Smart hw->mq_count = 0;
27*e10fc233SJames Smart hw->wq_count = 0;
28*e10fc233SJames Smart hw->rq_count = 0;
29*e10fc233SJames Smart hw->hw_rq_count = 0;
30*e10fc233SJames Smart INIT_LIST_HEAD(&hw->eq_list);
31*e10fc233SJames Smart
32*e10fc233SJames Smart for (i = 0; i < hw->config.n_eq; i++) {
33*e10fc233SJames Smart /* Create EQ */
34*e10fc233SJames Smart eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
35*e10fc233SJames Smart if (!eq) {
36*e10fc233SJames Smart efct_hw_queue_teardown(hw);
37*e10fc233SJames Smart return -ENOMEM;
38*e10fc233SJames Smart }
39*e10fc233SJames Smart
40*e10fc233SJames Smart eqs[i] = eq;
41*e10fc233SJames Smart
42*e10fc233SJames Smart /* Create one MQ */
43*e10fc233SJames Smart if (!i) {
44*e10fc233SJames Smart cq = efct_hw_new_cq(eq,
45*e10fc233SJames Smart hw->num_qentries[SLI4_QTYPE_CQ]);
46*e10fc233SJames Smart if (!cq) {
47*e10fc233SJames Smart efct_hw_queue_teardown(hw);
48*e10fc233SJames Smart return -ENOMEM;
49*e10fc233SJames Smart }
50*e10fc233SJames Smart
51*e10fc233SJames Smart mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
52*e10fc233SJames Smart if (!mq) {
53*e10fc233SJames Smart efct_hw_queue_teardown(hw);
54*e10fc233SJames Smart return -ENOMEM;
55*e10fc233SJames Smart }
56*e10fc233SJames Smart }
57*e10fc233SJames Smart
58*e10fc233SJames Smart /* Create WQ */
59*e10fc233SJames Smart cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
60*e10fc233SJames Smart if (!cq) {
61*e10fc233SJames Smart efct_hw_queue_teardown(hw);
62*e10fc233SJames Smart return -ENOMEM;
63*e10fc233SJames Smart }
64*e10fc233SJames Smart
65*e10fc233SJames Smart wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
66*e10fc233SJames Smart if (!wq) {
67*e10fc233SJames Smart efct_hw_queue_teardown(hw);
68*e10fc233SJames Smart return -ENOMEM;
69*e10fc233SJames Smart }
70*e10fc233SJames Smart }
71*e10fc233SJames Smart
72*e10fc233SJames Smart /* Create CQ set */
73*e10fc233SJames Smart if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
74*e10fc233SJames Smart efct_hw_queue_teardown(hw);
75*e10fc233SJames Smart return -EIO;
76*e10fc233SJames Smart }
77*e10fc233SJames Smart
78*e10fc233SJames Smart /* Create RQ set */
79*e10fc233SJames Smart if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
80*e10fc233SJames Smart efct_hw_queue_teardown(hw);
81*e10fc233SJames Smart return -EIO;
82*e10fc233SJames Smart }
83*e10fc233SJames Smart
84*e10fc233SJames Smart for (j = 0; j < i ; j++) {
85*e10fc233SJames Smart rqs[j]->filter_mask = 0;
86*e10fc233SJames Smart rqs[j]->is_mrq = true;
87*e10fc233SJames Smart rqs[j]->base_mrq_id = rqs[0]->hdr->id;
88*e10fc233SJames Smart }
89*e10fc233SJames Smart
90*e10fc233SJames Smart hw->hw_mrq_count = i;
91*e10fc233SJames Smart
92*e10fc233SJames Smart return 0;
93*e10fc233SJames Smart }
94*e10fc233SJames Smart
95*e10fc233SJames Smart int
efct_hw_map_wq_cpu(struct efct_hw * hw)96*e10fc233SJames Smart efct_hw_map_wq_cpu(struct efct_hw *hw)
97*e10fc233SJames Smart {
98*e10fc233SJames Smart struct efct *efct = hw->os;
99*e10fc233SJames Smart u32 cpu = 0, i;
100*e10fc233SJames Smart
101*e10fc233SJames Smart /* Init cpu_map array */
102*e10fc233SJames Smart hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
103*e10fc233SJames Smart GFP_KERNEL);
104*e10fc233SJames Smart if (!hw->wq_cpu_array)
105*e10fc233SJames Smart return -ENOMEM;
106*e10fc233SJames Smart
107*e10fc233SJames Smart for (i = 0; i < hw->config.n_eq; i++) {
108*e10fc233SJames Smart const struct cpumask *maskp;
109*e10fc233SJames Smart
110*e10fc233SJames Smart /* Get a CPU mask for all CPUs affinitized to this vector */
111*e10fc233SJames Smart maskp = pci_irq_get_affinity(efct->pci, i);
112*e10fc233SJames Smart if (!maskp) {
113*e10fc233SJames Smart efc_log_debug(efct, "maskp null for vector:%d\n", i);
114*e10fc233SJames Smart continue;
115*e10fc233SJames Smart }
116*e10fc233SJames Smart
117*e10fc233SJames Smart /* Loop through all CPUs associated with vector idx */
118*e10fc233SJames Smart for_each_cpu_and(cpu, maskp, cpu_present_mask) {
119*e10fc233SJames Smart efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
120*e10fc233SJames Smart hw->wq_cpu_array[cpu] = hw->hw_wq[i];
121*e10fc233SJames Smart }
122*e10fc233SJames Smart }
123*e10fc233SJames Smart
124*e10fc233SJames Smart return 0;
125*e10fc233SJames Smart }
126*e10fc233SJames Smart
127*e10fc233SJames Smart struct hw_eq *
efct_hw_new_eq(struct efct_hw * hw,u32 entry_count)128*e10fc233SJames Smart efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
129*e10fc233SJames Smart {
130*e10fc233SJames Smart struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
131*e10fc233SJames Smart
132*e10fc233SJames Smart if (!eq)
133*e10fc233SJames Smart return NULL;
134*e10fc233SJames Smart
135*e10fc233SJames Smart eq->type = SLI4_QTYPE_EQ;
136*e10fc233SJames Smart eq->hw = hw;
137*e10fc233SJames Smart eq->entry_count = entry_count;
138*e10fc233SJames Smart eq->instance = hw->eq_count++;
139*e10fc233SJames Smart eq->queue = &hw->eq[eq->instance];
140*e10fc233SJames Smart INIT_LIST_HEAD(&eq->cq_list);
141*e10fc233SJames Smart
142*e10fc233SJames Smart if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
143*e10fc233SJames Smart NULL)) {
144*e10fc233SJames Smart efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
145*e10fc233SJames Smart kfree(eq);
146*e10fc233SJames Smart return NULL;
147*e10fc233SJames Smart }
148*e10fc233SJames Smart
149*e10fc233SJames Smart sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
150*e10fc233SJames Smart hw->hw_eq[eq->instance] = eq;
151*e10fc233SJames Smart INIT_LIST_HEAD(&eq->list_entry);
152*e10fc233SJames Smart list_add_tail(&eq->list_entry, &hw->eq_list);
153*e10fc233SJames Smart efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
154*e10fc233SJames Smart eq->queue->id, eq->entry_count);
155*e10fc233SJames Smart return eq;
156*e10fc233SJames Smart }
157*e10fc233SJames Smart
158*e10fc233SJames Smart struct hw_cq *
efct_hw_new_cq(struct hw_eq * eq,u32 entry_count)159*e10fc233SJames Smart efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
160*e10fc233SJames Smart {
161*e10fc233SJames Smart struct efct_hw *hw = eq->hw;
162*e10fc233SJames Smart struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
163*e10fc233SJames Smart
164*e10fc233SJames Smart if (!cq)
165*e10fc233SJames Smart return NULL;
166*e10fc233SJames Smart
167*e10fc233SJames Smart cq->eq = eq;
168*e10fc233SJames Smart cq->type = SLI4_QTYPE_CQ;
169*e10fc233SJames Smart cq->instance = eq->hw->cq_count++;
170*e10fc233SJames Smart cq->entry_count = entry_count;
171*e10fc233SJames Smart cq->queue = &hw->cq[cq->instance];
172*e10fc233SJames Smart
173*e10fc233SJames Smart INIT_LIST_HEAD(&cq->q_list);
174*e10fc233SJames Smart
175*e10fc233SJames Smart if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
176*e10fc233SJames Smart cq->entry_count, eq->queue)) {
177*e10fc233SJames Smart efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
178*e10fc233SJames Smart eq->instance, eq->entry_count);
179*e10fc233SJames Smart kfree(cq);
180*e10fc233SJames Smart return NULL;
181*e10fc233SJames Smart }
182*e10fc233SJames Smart
183*e10fc233SJames Smart hw->hw_cq[cq->instance] = cq;
184*e10fc233SJames Smart INIT_LIST_HEAD(&cq->list_entry);
185*e10fc233SJames Smart list_add_tail(&cq->list_entry, &eq->cq_list);
186*e10fc233SJames Smart efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
187*e10fc233SJames Smart cq->queue->id, cq->entry_count);
188*e10fc233SJames Smart return cq;
189*e10fc233SJames Smart }
190*e10fc233SJames Smart
191*e10fc233SJames Smart u32
efct_hw_new_cq_set(struct hw_eq * eqs[],struct hw_cq * cqs[],u32 num_cqs,u32 entry_count)192*e10fc233SJames Smart efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
193*e10fc233SJames Smart u32 num_cqs, u32 entry_count)
194*e10fc233SJames Smart {
195*e10fc233SJames Smart u32 i;
196*e10fc233SJames Smart struct efct_hw *hw = eqs[0]->hw;
197*e10fc233SJames Smart struct sli4 *sli4 = &hw->sli;
198*e10fc233SJames Smart struct hw_cq *cq = NULL;
199*e10fc233SJames Smart struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
200*e10fc233SJames Smart struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
201*e10fc233SJames Smart
202*e10fc233SJames Smart /* Initialise CQS pointers to NULL */
203*e10fc233SJames Smart for (i = 0; i < num_cqs; i++)
204*e10fc233SJames Smart cqs[i] = NULL;
205*e10fc233SJames Smart
206*e10fc233SJames Smart for (i = 0; i < num_cqs; i++) {
207*e10fc233SJames Smart cq = kzalloc(sizeof(*cq), GFP_KERNEL);
208*e10fc233SJames Smart if (!cq)
209*e10fc233SJames Smart goto error;
210*e10fc233SJames Smart
211*e10fc233SJames Smart cqs[i] = cq;
212*e10fc233SJames Smart cq->eq = eqs[i];
213*e10fc233SJames Smart cq->type = SLI4_QTYPE_CQ;
214*e10fc233SJames Smart cq->instance = hw->cq_count++;
215*e10fc233SJames Smart cq->entry_count = entry_count;
216*e10fc233SJames Smart cq->queue = &hw->cq[cq->instance];
217*e10fc233SJames Smart qs[i] = cq->queue;
218*e10fc233SJames Smart assefct[i] = eqs[i]->queue;
219*e10fc233SJames Smart INIT_LIST_HEAD(&cq->q_list);
220*e10fc233SJames Smart }
221*e10fc233SJames Smart
222*e10fc233SJames Smart if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
223*e10fc233SJames Smart efc_log_err(hw->os, "Failed to create CQ Set.\n");
224*e10fc233SJames Smart goto error;
225*e10fc233SJames Smart }
226*e10fc233SJames Smart
227*e10fc233SJames Smart for (i = 0; i < num_cqs; i++) {
228*e10fc233SJames Smart hw->hw_cq[cqs[i]->instance] = cqs[i];
229*e10fc233SJames Smart INIT_LIST_HEAD(&cqs[i]->list_entry);
230*e10fc233SJames Smart list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
231*e10fc233SJames Smart }
232*e10fc233SJames Smart
233*e10fc233SJames Smart return 0;
234*e10fc233SJames Smart
235*e10fc233SJames Smart error:
236*e10fc233SJames Smart for (i = 0; i < num_cqs; i++) {
237*e10fc233SJames Smart kfree(cqs[i]);
238*e10fc233SJames Smart cqs[i] = NULL;
239*e10fc233SJames Smart }
240*e10fc233SJames Smart return -EIO;
241*e10fc233SJames Smart }
242*e10fc233SJames Smart
243*e10fc233SJames Smart struct hw_mq *
efct_hw_new_mq(struct hw_cq * cq,u32 entry_count)244*e10fc233SJames Smart efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
245*e10fc233SJames Smart {
246*e10fc233SJames Smart struct efct_hw *hw = cq->eq->hw;
247*e10fc233SJames Smart struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
248*e10fc233SJames Smart
249*e10fc233SJames Smart if (!mq)
250*e10fc233SJames Smart return NULL;
251*e10fc233SJames Smart
252*e10fc233SJames Smart mq->cq = cq;
253*e10fc233SJames Smart mq->type = SLI4_QTYPE_MQ;
254*e10fc233SJames Smart mq->instance = cq->eq->hw->mq_count++;
255*e10fc233SJames Smart mq->entry_count = entry_count;
256*e10fc233SJames Smart mq->entry_size = EFCT_HW_MQ_DEPTH;
257*e10fc233SJames Smart mq->queue = &hw->mq[mq->instance];
258*e10fc233SJames Smart
259*e10fc233SJames Smart if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
260*e10fc233SJames Smart cq->queue)) {
261*e10fc233SJames Smart efc_log_err(hw->os, "MQ allocation failure\n");
262*e10fc233SJames Smart kfree(mq);
263*e10fc233SJames Smart return NULL;
264*e10fc233SJames Smart }
265*e10fc233SJames Smart
266*e10fc233SJames Smart hw->hw_mq[mq->instance] = mq;
267*e10fc233SJames Smart INIT_LIST_HEAD(&mq->list_entry);
268*e10fc233SJames Smart list_add_tail(&mq->list_entry, &cq->q_list);
269*e10fc233SJames Smart efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
270*e10fc233SJames Smart mq->queue->id, mq->entry_count);
271*e10fc233SJames Smart return mq;
272*e10fc233SJames Smart }
273*e10fc233SJames Smart
274*e10fc233SJames Smart struct hw_wq *
efct_hw_new_wq(struct hw_cq * cq,u32 entry_count)275*e10fc233SJames Smart efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
276*e10fc233SJames Smart {
277*e10fc233SJames Smart struct efct_hw *hw = cq->eq->hw;
278*e10fc233SJames Smart struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
279*e10fc233SJames Smart
280*e10fc233SJames Smart if (!wq)
281*e10fc233SJames Smart return NULL;
282*e10fc233SJames Smart
283*e10fc233SJames Smart wq->hw = cq->eq->hw;
284*e10fc233SJames Smart wq->cq = cq;
285*e10fc233SJames Smart wq->type = SLI4_QTYPE_WQ;
286*e10fc233SJames Smart wq->instance = cq->eq->hw->wq_count++;
287*e10fc233SJames Smart wq->entry_count = entry_count;
288*e10fc233SJames Smart wq->queue = &hw->wq[wq->instance];
289*e10fc233SJames Smart wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
290*e10fc233SJames Smart wq->wqec_count = wq->wqec_set_count;
291*e10fc233SJames Smart wq->free_count = wq->entry_count - 1;
292*e10fc233SJames Smart INIT_LIST_HEAD(&wq->pending_list);
293*e10fc233SJames Smart
294*e10fc233SJames Smart if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
295*e10fc233SJames Smart wq->entry_count, cq->queue)) {
296*e10fc233SJames Smart efc_log_err(hw->os, "WQ allocation failure\n");
297*e10fc233SJames Smart kfree(wq);
298*e10fc233SJames Smart return NULL;
299*e10fc233SJames Smart }
300*e10fc233SJames Smart
301*e10fc233SJames Smart hw->hw_wq[wq->instance] = wq;
302*e10fc233SJames Smart INIT_LIST_HEAD(&wq->list_entry);
303*e10fc233SJames Smart list_add_tail(&wq->list_entry, &cq->q_list);
304*e10fc233SJames Smart efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
305*e10fc233SJames Smart wq->instance, wq->queue->id, wq->entry_count, wq->class);
306*e10fc233SJames Smart return wq;
307*e10fc233SJames Smart }
308*e10fc233SJames Smart
309*e10fc233SJames Smart u32
efct_hw_new_rq_set(struct hw_cq * cqs[],struct hw_rq * rqs[],u32 num_rq_pairs,u32 entry_count)310*e10fc233SJames Smart efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
311*e10fc233SJames Smart u32 num_rq_pairs, u32 entry_count)
312*e10fc233SJames Smart {
313*e10fc233SJames Smart struct efct_hw *hw = cqs[0]->eq->hw;
314*e10fc233SJames Smart struct hw_rq *rq = NULL;
315*e10fc233SJames Smart struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
316*e10fc233SJames Smart u32 i, q_count, size;
317*e10fc233SJames Smart
318*e10fc233SJames Smart /* Initialise RQS pointers */
319*e10fc233SJames Smart for (i = 0; i < num_rq_pairs; i++)
320*e10fc233SJames Smart rqs[i] = NULL;
321*e10fc233SJames Smart
322*e10fc233SJames Smart /*
323*e10fc233SJames Smart * Allocate an RQ object SET, where each element in set
324*e10fc233SJames Smart * encapsulates 2 SLI queues (for rq pair)
325*e10fc233SJames Smart */
326*e10fc233SJames Smart for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
327*e10fc233SJames Smart rq = kzalloc(sizeof(*rq), GFP_KERNEL);
328*e10fc233SJames Smart if (!rq)
329*e10fc233SJames Smart goto error;
330*e10fc233SJames Smart
331*e10fc233SJames Smart rqs[i] = rq;
332*e10fc233SJames Smart rq->instance = hw->hw_rq_count++;
333*e10fc233SJames Smart rq->cq = cqs[i];
334*e10fc233SJames Smart rq->type = SLI4_QTYPE_RQ;
335*e10fc233SJames Smart rq->entry_count = entry_count;
336*e10fc233SJames Smart
337*e10fc233SJames Smart /* Header RQ */
338*e10fc233SJames Smart rq->hdr = &hw->rq[hw->rq_count];
339*e10fc233SJames Smart rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
340*e10fc233SJames Smart hw->hw_rq_lookup[hw->rq_count] = rq->instance;
341*e10fc233SJames Smart hw->rq_count++;
342*e10fc233SJames Smart qs[q_count] = rq->hdr;
343*e10fc233SJames Smart
344*e10fc233SJames Smart /* Data RQ */
345*e10fc233SJames Smart rq->data = &hw->rq[hw->rq_count];
346*e10fc233SJames Smart rq->data_entry_size = hw->config.rq_default_buffer_size;
347*e10fc233SJames Smart hw->hw_rq_lookup[hw->rq_count] = rq->instance;
348*e10fc233SJames Smart hw->rq_count++;
349*e10fc233SJames Smart qs[q_count + 1] = rq->data;
350*e10fc233SJames Smart
351*e10fc233SJames Smart rq->rq_tracker = NULL;
352*e10fc233SJames Smart }
353*e10fc233SJames Smart
354*e10fc233SJames Smart if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
355*e10fc233SJames Smart cqs[0]->queue->id,
356*e10fc233SJames Smart rqs[0]->entry_count,
357*e10fc233SJames Smart rqs[0]->hdr_entry_size,
358*e10fc233SJames Smart rqs[0]->data_entry_size)) {
359*e10fc233SJames Smart efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
360*e10fc233SJames Smart cqs[0]->queue->id);
361*e10fc233SJames Smart goto error;
362*e10fc233SJames Smart }
363*e10fc233SJames Smart
364*e10fc233SJames Smart for (i = 0; i < num_rq_pairs; i++) {
365*e10fc233SJames Smart hw->hw_rq[rqs[i]->instance] = rqs[i];
366*e10fc233SJames Smart INIT_LIST_HEAD(&rqs[i]->list_entry);
367*e10fc233SJames Smart list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
368*e10fc233SJames Smart size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
369*e10fc233SJames Smart rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
370*e10fc233SJames Smart if (!rqs[i]->rq_tracker)
371*e10fc233SJames Smart goto error;
372*e10fc233SJames Smart }
373*e10fc233SJames Smart
374*e10fc233SJames Smart return 0;
375*e10fc233SJames Smart
376*e10fc233SJames Smart error:
377*e10fc233SJames Smart for (i = 0; i < num_rq_pairs; i++) {
378*e10fc233SJames Smart if (rqs[i]) {
379*e10fc233SJames Smart kfree(rqs[i]->rq_tracker);
380*e10fc233SJames Smart kfree(rqs[i]);
381*e10fc233SJames Smart }
382*e10fc233SJames Smart }
383*e10fc233SJames Smart
384*e10fc233SJames Smart return -EIO;
385*e10fc233SJames Smart }
386*e10fc233SJames Smart
387*e10fc233SJames Smart void
efct_hw_del_eq(struct hw_eq * eq)388*e10fc233SJames Smart efct_hw_del_eq(struct hw_eq *eq)
389*e10fc233SJames Smart {
390*e10fc233SJames Smart struct hw_cq *cq;
391*e10fc233SJames Smart struct hw_cq *cq_next;
392*e10fc233SJames Smart
393*e10fc233SJames Smart if (!eq)
394*e10fc233SJames Smart return;
395*e10fc233SJames Smart
396*e10fc233SJames Smart list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
397*e10fc233SJames Smart efct_hw_del_cq(cq);
398*e10fc233SJames Smart list_del(&eq->list_entry);
399*e10fc233SJames Smart eq->hw->hw_eq[eq->instance] = NULL;
400*e10fc233SJames Smart kfree(eq);
401*e10fc233SJames Smart }
402*e10fc233SJames Smart
403*e10fc233SJames Smart void
efct_hw_del_cq(struct hw_cq * cq)404*e10fc233SJames Smart efct_hw_del_cq(struct hw_cq *cq)
405*e10fc233SJames Smart {
406*e10fc233SJames Smart struct hw_q *q;
407*e10fc233SJames Smart struct hw_q *q_next;
408*e10fc233SJames Smart
409*e10fc233SJames Smart if (!cq)
410*e10fc233SJames Smart return;
411*e10fc233SJames Smart
412*e10fc233SJames Smart list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
413*e10fc233SJames Smart switch (q->type) {
414*e10fc233SJames Smart case SLI4_QTYPE_MQ:
415*e10fc233SJames Smart efct_hw_del_mq((struct hw_mq *)q);
416*e10fc233SJames Smart break;
417*e10fc233SJames Smart case SLI4_QTYPE_WQ:
418*e10fc233SJames Smart efct_hw_del_wq((struct hw_wq *)q);
419*e10fc233SJames Smart break;
420*e10fc233SJames Smart case SLI4_QTYPE_RQ:
421*e10fc233SJames Smart efct_hw_del_rq((struct hw_rq *)q);
422*e10fc233SJames Smart break;
423*e10fc233SJames Smart default:
424*e10fc233SJames Smart break;
425*e10fc233SJames Smart }
426*e10fc233SJames Smart }
427*e10fc233SJames Smart list_del(&cq->list_entry);
428*e10fc233SJames Smart cq->eq->hw->hw_cq[cq->instance] = NULL;
429*e10fc233SJames Smart kfree(cq);
430*e10fc233SJames Smart }
431*e10fc233SJames Smart
432*e10fc233SJames Smart void
efct_hw_del_mq(struct hw_mq * mq)433*e10fc233SJames Smart efct_hw_del_mq(struct hw_mq *mq)
434*e10fc233SJames Smart {
435*e10fc233SJames Smart if (!mq)
436*e10fc233SJames Smart return;
437*e10fc233SJames Smart
438*e10fc233SJames Smart list_del(&mq->list_entry);
439*e10fc233SJames Smart mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
440*e10fc233SJames Smart kfree(mq);
441*e10fc233SJames Smart }
442*e10fc233SJames Smart
443*e10fc233SJames Smart void
efct_hw_del_wq(struct hw_wq * wq)444*e10fc233SJames Smart efct_hw_del_wq(struct hw_wq *wq)
445*e10fc233SJames Smart {
446*e10fc233SJames Smart if (!wq)
447*e10fc233SJames Smart return;
448*e10fc233SJames Smart
449*e10fc233SJames Smart list_del(&wq->list_entry);
450*e10fc233SJames Smart wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
451*e10fc233SJames Smart kfree(wq);
452*e10fc233SJames Smart }
453*e10fc233SJames Smart
454*e10fc233SJames Smart void
efct_hw_del_rq(struct hw_rq * rq)455*e10fc233SJames Smart efct_hw_del_rq(struct hw_rq *rq)
456*e10fc233SJames Smart {
457*e10fc233SJames Smart struct efct_hw *hw = NULL;
458*e10fc233SJames Smart
459*e10fc233SJames Smart if (!rq)
460*e10fc233SJames Smart return;
461*e10fc233SJames Smart /* Free RQ tracker */
462*e10fc233SJames Smart kfree(rq->rq_tracker);
463*e10fc233SJames Smart rq->rq_tracker = NULL;
464*e10fc233SJames Smart list_del(&rq->list_entry);
465*e10fc233SJames Smart hw = rq->cq->eq->hw;
466*e10fc233SJames Smart hw->hw_rq[rq->instance] = NULL;
467*e10fc233SJames Smart kfree(rq);
468*e10fc233SJames Smart }
469*e10fc233SJames Smart
470*e10fc233SJames Smart void
efct_hw_queue_teardown(struct efct_hw * hw)471*e10fc233SJames Smart efct_hw_queue_teardown(struct efct_hw *hw)
472*e10fc233SJames Smart {
473*e10fc233SJames Smart struct hw_eq *eq;
474*e10fc233SJames Smart struct hw_eq *eq_next;
475*e10fc233SJames Smart
476*e10fc233SJames Smart if (!hw->eq_list.next)
477*e10fc233SJames Smart return;
478*e10fc233SJames Smart
479*e10fc233SJames Smart list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
480*e10fc233SJames Smart efct_hw_del_eq(eq);
481*e10fc233SJames Smart }
482*e10fc233SJames Smart
483*e10fc233SJames Smart static inline int
efct_hw_rqpair_find(struct efct_hw * hw,u16 rq_id)484*e10fc233SJames Smart efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
485*e10fc233SJames Smart {
486*e10fc233SJames Smart return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
487*e10fc233SJames Smart }
488*e10fc233SJames Smart
489*e10fc233SJames Smart static struct efc_hw_sequence *
efct_hw_rqpair_get(struct efct_hw * hw,u16 rqindex,u16 bufindex)490*e10fc233SJames Smart efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
491*e10fc233SJames Smart {
492*e10fc233SJames Smart struct sli4_queue *rq_hdr = &hw->rq[rqindex];
493*e10fc233SJames Smart struct efc_hw_sequence *seq = NULL;
494*e10fc233SJames Smart struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
495*e10fc233SJames Smart unsigned long flags = 0;
496*e10fc233SJames Smart
497*e10fc233SJames Smart if (bufindex >= rq_hdr->length) {
498*e10fc233SJames Smart efc_log_err(hw->os,
499*e10fc233SJames Smart "RQidx %d bufidx %d exceed ring len %d for id %d\n",
500*e10fc233SJames Smart rqindex, bufindex, rq_hdr->length, rq_hdr->id);
501*e10fc233SJames Smart return NULL;
502*e10fc233SJames Smart }
503*e10fc233SJames Smart
504*e10fc233SJames Smart /* rq_hdr lock also covers rqindex+1 queue */
505*e10fc233SJames Smart spin_lock_irqsave(&rq_hdr->lock, flags);
506*e10fc233SJames Smart
507*e10fc233SJames Smart seq = rq->rq_tracker[bufindex];
508*e10fc233SJames Smart rq->rq_tracker[bufindex] = NULL;
509*e10fc233SJames Smart
510*e10fc233SJames Smart if (!seq) {
511*e10fc233SJames Smart efc_log_err(hw->os,
512*e10fc233SJames Smart "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
513*e10fc233SJames Smart rqindex, bufindex, rq_hdr->index);
514*e10fc233SJames Smart }
515*e10fc233SJames Smart
516*e10fc233SJames Smart spin_unlock_irqrestore(&rq_hdr->lock, flags);
517*e10fc233SJames Smart return seq;
518*e10fc233SJames Smart }
519*e10fc233SJames Smart
520*e10fc233SJames Smart int
efct_hw_rqpair_process_rq(struct efct_hw * hw,struct hw_cq * cq,u8 * cqe)521*e10fc233SJames Smart efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
522*e10fc233SJames Smart u8 *cqe)
523*e10fc233SJames Smart {
524*e10fc233SJames Smart u16 rq_id;
525*e10fc233SJames Smart u32 index;
526*e10fc233SJames Smart int rqindex;
527*e10fc233SJames Smart int rq_status;
528*e10fc233SJames Smart u32 h_len;
529*e10fc233SJames Smart u32 p_len;
530*e10fc233SJames Smart struct efc_hw_sequence *seq;
531*e10fc233SJames Smart struct hw_rq *rq;
532*e10fc233SJames Smart
533*e10fc233SJames Smart rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
534*e10fc233SJames Smart &rq_id, &index);
535*e10fc233SJames Smart if (rq_status != 0) {
536*e10fc233SJames Smart switch (rq_status) {
537*e10fc233SJames Smart case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
538*e10fc233SJames Smart case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
539*e10fc233SJames Smart /* just get RQ buffer then return to chip */
540*e10fc233SJames Smart rqindex = efct_hw_rqpair_find(hw, rq_id);
541*e10fc233SJames Smart if (rqindex < 0) {
542*e10fc233SJames Smart efc_log_debug(hw->os,
543*e10fc233SJames Smart "status=%#x: lookup fail id=%#x\n",
544*e10fc233SJames Smart rq_status, rq_id);
545*e10fc233SJames Smart break;
546*e10fc233SJames Smart }
547*e10fc233SJames Smart
548*e10fc233SJames Smart /* get RQ buffer */
549*e10fc233SJames Smart seq = efct_hw_rqpair_get(hw, rqindex, index);
550*e10fc233SJames Smart
551*e10fc233SJames Smart /* return to chip */
552*e10fc233SJames Smart if (efct_hw_rqpair_sequence_free(hw, seq)) {
553*e10fc233SJames Smart efc_log_debug(hw->os,
554*e10fc233SJames Smart "status=%#x,fail rtrn buf to RQ\n",
555*e10fc233SJames Smart rq_status);
556*e10fc233SJames Smart break;
557*e10fc233SJames Smart }
558*e10fc233SJames Smart break;
559*e10fc233SJames Smart case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
560*e10fc233SJames Smart case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
561*e10fc233SJames Smart /*
562*e10fc233SJames Smart * since RQ buffers were not consumed, cannot return
563*e10fc233SJames Smart * them to chip
564*e10fc233SJames Smart */
565*e10fc233SJames Smart efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
566*e10fc233SJames Smart rq_status);
567*e10fc233SJames Smart fallthrough;
568*e10fc233SJames Smart default:
569*e10fc233SJames Smart break;
570*e10fc233SJames Smart }
571*e10fc233SJames Smart return -EIO;
572*e10fc233SJames Smart }
573*e10fc233SJames Smart
574*e10fc233SJames Smart rqindex = efct_hw_rqpair_find(hw, rq_id);
575*e10fc233SJames Smart if (rqindex < 0) {
576*e10fc233SJames Smart efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
577*e10fc233SJames Smart rq_id);
578*e10fc233SJames Smart return -EIO;
579*e10fc233SJames Smart }
580*e10fc233SJames Smart
581*e10fc233SJames Smart rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
582*e10fc233SJames Smart rq->use_count++;
583*e10fc233SJames Smart
584*e10fc233SJames Smart seq = efct_hw_rqpair_get(hw, rqindex, index);
585*e10fc233SJames Smart if (WARN_ON(!seq))
586*e10fc233SJames Smart return -EIO;
587*e10fc233SJames Smart
588*e10fc233SJames Smart seq->hw = hw;
589*e10fc233SJames Smart
590*e10fc233SJames Smart sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
591*e10fc233SJames Smart seq->header->dma.len = h_len;
592*e10fc233SJames Smart seq->payload->dma.len = p_len;
593*e10fc233SJames Smart seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
594*e10fc233SJames Smart seq->hw_priv = cq->eq;
595*e10fc233SJames Smart
596*e10fc233SJames Smart efct_unsolicited_cb(hw->os, seq);
597*e10fc233SJames Smart
598*e10fc233SJames Smart return 0;
599*e10fc233SJames Smart }
600*e10fc233SJames Smart
601*e10fc233SJames Smart static int
efct_hw_rqpair_put(struct efct_hw * hw,struct efc_hw_sequence * seq)602*e10fc233SJames Smart efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
603*e10fc233SJames Smart {
604*e10fc233SJames Smart struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
605*e10fc233SJames Smart struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
606*e10fc233SJames Smart u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
607*e10fc233SJames Smart struct hw_rq *rq = hw->hw_rq[hw_rq_index];
608*e10fc233SJames Smart u32 phys_hdr[2];
609*e10fc233SJames Smart u32 phys_payload[2];
610*e10fc233SJames Smart int qindex_hdr;
611*e10fc233SJames Smart int qindex_payload;
612*e10fc233SJames Smart unsigned long flags = 0;
613*e10fc233SJames Smart
614*e10fc233SJames Smart /* Update the RQ verification lookup tables */
615*e10fc233SJames Smart phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
616*e10fc233SJames Smart phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
617*e10fc233SJames Smart phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
618*e10fc233SJames Smart phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
619*e10fc233SJames Smart
620*e10fc233SJames Smart /* rq_hdr lock also covers payload / header->rqindex+1 queue */
621*e10fc233SJames Smart spin_lock_irqsave(&rq_hdr->lock, flags);
622*e10fc233SJames Smart
623*e10fc233SJames Smart /*
624*e10fc233SJames Smart * Note: The header must be posted last for buffer pair mode because
625*e10fc233SJames Smart * posting on the header queue posts the payload queue as well.
626*e10fc233SJames Smart * We do not ring the payload queue independently in RQ pair mode.
627*e10fc233SJames Smart */
628*e10fc233SJames Smart qindex_payload = sli_rq_write(&hw->sli, rq_payload,
629*e10fc233SJames Smart (void *)phys_payload);
630*e10fc233SJames Smart qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
631*e10fc233SJames Smart if (qindex_hdr < 0 ||
632*e10fc233SJames Smart qindex_payload < 0) {
633*e10fc233SJames Smart efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
634*e10fc233SJames Smart spin_unlock_irqrestore(&rq_hdr->lock, flags);
635*e10fc233SJames Smart return -EIO;
636*e10fc233SJames Smart }
637*e10fc233SJames Smart
638*e10fc233SJames Smart /* ensure the indexes are the same */
639*e10fc233SJames Smart WARN_ON(qindex_hdr != qindex_payload);
640*e10fc233SJames Smart
641*e10fc233SJames Smart /* Update the lookup table */
642*e10fc233SJames Smart if (!rq->rq_tracker[qindex_hdr]) {
643*e10fc233SJames Smart rq->rq_tracker[qindex_hdr] = seq;
644*e10fc233SJames Smart } else {
645*e10fc233SJames Smart efc_log_debug(hw->os,
646*e10fc233SJames Smart "expected rq_tracker[%d][%d] buffer to be NULL\n",
647*e10fc233SJames Smart hw_rq_index, qindex_hdr);
648*e10fc233SJames Smart }
649*e10fc233SJames Smart
650*e10fc233SJames Smart spin_unlock_irqrestore(&rq_hdr->lock, flags);
651*e10fc233SJames Smart return 0;
652*e10fc233SJames Smart }
653*e10fc233SJames Smart
654*e10fc233SJames Smart int
efct_hw_rqpair_sequence_free(struct efct_hw * hw,struct efc_hw_sequence * seq)655*e10fc233SJames Smart efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
656*e10fc233SJames Smart {
657*e10fc233SJames Smart int rc = 0;
658*e10fc233SJames Smart
659*e10fc233SJames Smart /*
660*e10fc233SJames Smart * Post the data buffer first. Because in RQ pair mode, ringing the
661*e10fc233SJames Smart * doorbell of the header ring will post the data buffer as well.
662*e10fc233SJames Smart */
663*e10fc233SJames Smart if (efct_hw_rqpair_put(hw, seq)) {
664*e10fc233SJames Smart efc_log_err(hw->os, "error writing buffers\n");
665*e10fc233SJames Smart return -EIO;
666*e10fc233SJames Smart }
667*e10fc233SJames Smart
668*e10fc233SJames Smart return rc;
669*e10fc233SJames Smart }
670*e10fc233SJames Smart
671*e10fc233SJames Smart int
efct_efc_hw_sequence_free(struct efc * efc,struct efc_hw_sequence * seq)672*e10fc233SJames Smart efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
673*e10fc233SJames Smart {
674*e10fc233SJames Smart struct efct *efct = efc->base;
675*e10fc233SJames Smart
676*e10fc233SJames Smart return efct_hw_rqpair_sequence_free(&efct->hw, seq);
677*e10fc233SJames Smart }
678