xref: /openbmc/linux/drivers/crypto/cavium/cpt/cptvf_reqmanager.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c694b233SGeorge Cherian /*
3c694b233SGeorge Cherian  * Copyright (C) 2016 Cavium, Inc.
4c694b233SGeorge Cherian  */
5c694b233SGeorge Cherian 
6c694b233SGeorge Cherian #include "cptvf.h"
7c4149431SHerbert Xu #include "cptvf_algs.h"
8c694b233SGeorge Cherian #include "request_manager.h"
9c694b233SGeorge Cherian 
10c694b233SGeorge Cherian /**
11c694b233SGeorge Cherian  * get_free_pending_entry - get free entry from pending queue
12*c4d7d318SLee Jones  * @q: pending queue
13*c4d7d318SLee Jones  * @qlen: queue length
14c694b233SGeorge Cherian  */
get_free_pending_entry(struct pending_queue * q,int qlen)15c694b233SGeorge Cherian static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
16c694b233SGeorge Cherian 						    int qlen)
17c694b233SGeorge Cherian {
18c694b233SGeorge Cherian 	struct pending_entry *ent = NULL;
19c694b233SGeorge Cherian 
20c694b233SGeorge Cherian 	ent = &q->head[q->rear];
21c694b233SGeorge Cherian 	if (unlikely(ent->busy)) {
22c694b233SGeorge Cherian 		ent = NULL;
23c694b233SGeorge Cherian 		goto no_free_entry;
24c694b233SGeorge Cherian 	}
25c694b233SGeorge Cherian 
26c694b233SGeorge Cherian 	q->rear++;
27c694b233SGeorge Cherian 	if (unlikely(q->rear == qlen))
28c694b233SGeorge Cherian 		q->rear = 0;
29c694b233SGeorge Cherian 
30c694b233SGeorge Cherian no_free_entry:
31c694b233SGeorge Cherian 	return ent;
32c694b233SGeorge Cherian }
33c694b233SGeorge Cherian 
pending_queue_inc_front(struct pending_qinfo * pqinfo,int qno)34c694b233SGeorge Cherian static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo,
35c694b233SGeorge Cherian 					   int qno)
36c694b233SGeorge Cherian {
37c694b233SGeorge Cherian 	struct pending_queue *queue = &pqinfo->queue[qno];
38c694b233SGeorge Cherian 
39c694b233SGeorge Cherian 	queue->front++;
40c694b233SGeorge Cherian 	if (unlikely(queue->front == pqinfo->qlen))
41c694b233SGeorge Cherian 		queue->front = 0;
42c694b233SGeorge Cherian }
43c694b233SGeorge Cherian 
setup_sgio_components(struct cpt_vf * cptvf,struct buf_ptr * list,int buf_count,u8 * buffer)44c694b233SGeorge Cherian static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list,
45c694b233SGeorge Cherian 				 int buf_count, u8 *buffer)
46c694b233SGeorge Cherian {
47c694b233SGeorge Cherian 	int ret = 0, i, j;
48c694b233SGeorge Cherian 	int components;
49c694b233SGeorge Cherian 	struct sglist_component *sg_ptr = NULL;
50c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
51c694b233SGeorge Cherian 
52c694b233SGeorge Cherian 	if (unlikely(!list)) {
53c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Input List pointer is NULL\n");
54c694b233SGeorge Cherian 		return -EFAULT;
55c694b233SGeorge Cherian 	}
56c694b233SGeorge Cherian 
57c694b233SGeorge Cherian 	for (i = 0; i < buf_count; i++) {
58c694b233SGeorge Cherian 		if (likely(list[i].vptr)) {
59c694b233SGeorge Cherian 			list[i].dma_addr = dma_map_single(&pdev->dev,
60c694b233SGeorge Cherian 							  list[i].vptr,
61c694b233SGeorge Cherian 							  list[i].size,
62c694b233SGeorge Cherian 							  DMA_BIDIRECTIONAL);
63c694b233SGeorge Cherian 			if (unlikely(dma_mapping_error(&pdev->dev,
64c694b233SGeorge Cherian 						       list[i].dma_addr))) {
65c694b233SGeorge Cherian 				dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n",
66c694b233SGeorge Cherian 					i);
67c694b233SGeorge Cherian 				ret = -EIO;
68c694b233SGeorge Cherian 				goto sg_cleanup;
69c694b233SGeorge Cherian 			}
70c694b233SGeorge Cherian 		}
71c694b233SGeorge Cherian 	}
72c694b233SGeorge Cherian 
73c694b233SGeorge Cherian 	components = buf_count / 4;
74c694b233SGeorge Cherian 	sg_ptr = (struct sglist_component *)buffer;
75c694b233SGeorge Cherian 	for (i = 0; i < components; i++) {
76c694b233SGeorge Cherian 		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
77c694b233SGeorge Cherian 		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
78c694b233SGeorge Cherian 		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
79c694b233SGeorge Cherian 		sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size);
80c694b233SGeorge Cherian 		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
81c694b233SGeorge Cherian 		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
82c694b233SGeorge Cherian 		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
83c694b233SGeorge Cherian 		sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
84c694b233SGeorge Cherian 		sg_ptr++;
85c694b233SGeorge Cherian 	}
86c694b233SGeorge Cherian 
87c694b233SGeorge Cherian 	components = buf_count % 4;
88c694b233SGeorge Cherian 
89c694b233SGeorge Cherian 	switch (components) {
90c694b233SGeorge Cherian 	case 3:
91c694b233SGeorge Cherian 		sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size);
92c694b233SGeorge Cherian 		sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
93df561f66SGustavo A. R. Silva 		fallthrough;
94c694b233SGeorge Cherian 	case 2:
95c694b233SGeorge Cherian 		sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size);
96c694b233SGeorge Cherian 		sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
97df561f66SGustavo A. R. Silva 		fallthrough;
98c694b233SGeorge Cherian 	case 1:
99c694b233SGeorge Cherian 		sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size);
100c694b233SGeorge Cherian 		sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
101c694b233SGeorge Cherian 		break;
102c694b233SGeorge Cherian 	default:
103c694b233SGeorge Cherian 		break;
104c694b233SGeorge Cherian 	}
105c694b233SGeorge Cherian 
106c694b233SGeorge Cherian 	return ret;
107c694b233SGeorge Cherian 
108c694b233SGeorge Cherian sg_cleanup:
109c694b233SGeorge Cherian 	for (j = 0; j < i; j++) {
110c694b233SGeorge Cherian 		if (list[j].dma_addr) {
111c694b233SGeorge Cherian 			dma_unmap_single(&pdev->dev, list[i].dma_addr,
112c694b233SGeorge Cherian 					 list[i].size, DMA_BIDIRECTIONAL);
113c694b233SGeorge Cherian 		}
114c694b233SGeorge Cherian 
115c694b233SGeorge Cherian 		list[j].dma_addr = 0;
116c694b233SGeorge Cherian 	}
117c694b233SGeorge Cherian 
118c694b233SGeorge Cherian 	return ret;
119c694b233SGeorge Cherian }
120c694b233SGeorge Cherian 
setup_sgio_list(struct cpt_vf * cptvf,struct cpt_info_buffer * info,struct cpt_request_info * req)121c694b233SGeorge Cherian static inline int setup_sgio_list(struct cpt_vf *cptvf,
122c694b233SGeorge Cherian 				  struct cpt_info_buffer *info,
123c694b233SGeorge Cherian 				  struct cpt_request_info *req)
124c694b233SGeorge Cherian {
125c694b233SGeorge Cherian 	u16 g_sz_bytes = 0, s_sz_bytes = 0;
126c694b233SGeorge Cherian 	int ret = 0;
127c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
128c694b233SGeorge Cherian 
129c694b233SGeorge Cherian 	if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) {
130c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Request SG components are higher than supported\n");
131c694b233SGeorge Cherian 		ret = -EINVAL;
132c694b233SGeorge Cherian 		goto  scatter_gather_clean;
133c694b233SGeorge Cherian 	}
134c694b233SGeorge Cherian 
135c694b233SGeorge Cherian 	/* Setup gather (input) components */
136c694b233SGeorge Cherian 	g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
1379e27c991SMikulas Patocka 	info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
138c694b233SGeorge Cherian 	if (!info->gather_components) {
139c694b233SGeorge Cherian 		ret = -ENOMEM;
140c694b233SGeorge Cherian 		goto  scatter_gather_clean;
141c694b233SGeorge Cherian 	}
142c694b233SGeorge Cherian 
143c694b233SGeorge Cherian 	ret = setup_sgio_components(cptvf, req->in,
144c694b233SGeorge Cherian 				    req->incnt,
145c694b233SGeorge Cherian 				    info->gather_components);
146c694b233SGeorge Cherian 	if (ret) {
147c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Failed to setup gather list\n");
148c694b233SGeorge Cherian 		ret = -EFAULT;
149c694b233SGeorge Cherian 		goto  scatter_gather_clean;
150c694b233SGeorge Cherian 	}
151c694b233SGeorge Cherian 
152c694b233SGeorge Cherian 	/* Setup scatter (output) components */
153c694b233SGeorge Cherian 	s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
1549e27c991SMikulas Patocka 	info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
155c694b233SGeorge Cherian 	if (!info->scatter_components) {
156c694b233SGeorge Cherian 		ret = -ENOMEM;
157c694b233SGeorge Cherian 		goto  scatter_gather_clean;
158c694b233SGeorge Cherian 	}
159c694b233SGeorge Cherian 
160c694b233SGeorge Cherian 	ret = setup_sgio_components(cptvf, req->out,
161c694b233SGeorge Cherian 				    req->outcnt,
162c694b233SGeorge Cherian 				    info->scatter_components);
163c694b233SGeorge Cherian 	if (ret) {
164c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Failed to setup gather list\n");
165c694b233SGeorge Cherian 		ret = -EFAULT;
166c694b233SGeorge Cherian 		goto  scatter_gather_clean;
167c694b233SGeorge Cherian 	}
168c694b233SGeorge Cherian 
169c694b233SGeorge Cherian 	/* Create and initialize DPTR */
170c694b233SGeorge Cherian 	info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
1719e27c991SMikulas Patocka 	info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
172c694b233SGeorge Cherian 	if (!info->in_buffer) {
173c694b233SGeorge Cherian 		ret = -ENOMEM;
174c694b233SGeorge Cherian 		goto  scatter_gather_clean;
175c694b233SGeorge Cherian 	}
176c694b233SGeorge Cherian 
177c4149431SHerbert Xu 	((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
178c4149431SHerbert Xu 	((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
179c4149431SHerbert Xu 	((__be16 *)info->in_buffer)[2] = 0;
180c4149431SHerbert Xu 	((__be16 *)info->in_buffer)[3] = 0;
181c694b233SGeorge Cherian 
182c694b233SGeorge Cherian 	memcpy(&info->in_buffer[8], info->gather_components,
183c694b233SGeorge Cherian 	       g_sz_bytes);
184c694b233SGeorge Cherian 	memcpy(&info->in_buffer[8 + g_sz_bytes],
185c694b233SGeorge Cherian 	       info->scatter_components, s_sz_bytes);
186c694b233SGeorge Cherian 
187c694b233SGeorge Cherian 	info->dptr_baddr = dma_map_single(&pdev->dev,
188c694b233SGeorge Cherian 					  (void *)info->in_buffer,
189c694b233SGeorge Cherian 					  info->dlen,
190c694b233SGeorge Cherian 					  DMA_BIDIRECTIONAL);
191c694b233SGeorge Cherian 	if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) {
192c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen);
193c694b233SGeorge Cherian 		ret = -EIO;
194c694b233SGeorge Cherian 		goto  scatter_gather_clean;
195c694b233SGeorge Cherian 	}
196c694b233SGeorge Cherian 
197c694b233SGeorge Cherian 	/* Create and initialize RPTR */
1989e27c991SMikulas Patocka 	info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
199c694b233SGeorge Cherian 	if (!info->out_buffer) {
200c694b233SGeorge Cherian 		ret = -ENOMEM;
201c694b233SGeorge Cherian 		goto scatter_gather_clean;
202c694b233SGeorge Cherian 	}
203c694b233SGeorge Cherian 
204c694b233SGeorge Cherian 	*((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT);
205c694b233SGeorge Cherian 	info->alternate_caddr = (u64 *)info->out_buffer;
206c694b233SGeorge Cherian 	info->rptr_baddr = dma_map_single(&pdev->dev,
207c694b233SGeorge Cherian 					  (void *)info->out_buffer,
208c694b233SGeorge Cherian 					  COMPLETION_CODE_SIZE,
209c694b233SGeorge Cherian 					  DMA_BIDIRECTIONAL);
210c694b233SGeorge Cherian 	if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) {
211c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Mapping RPTR Failed %d\n",
212c694b233SGeorge Cherian 			COMPLETION_CODE_SIZE);
213c694b233SGeorge Cherian 		ret = -EIO;
214c694b233SGeorge Cherian 		goto  scatter_gather_clean;
215c694b233SGeorge Cherian 	}
216c694b233SGeorge Cherian 
217c694b233SGeorge Cherian 	return 0;
218c694b233SGeorge Cherian 
219c694b233SGeorge Cherian scatter_gather_clean:
220c694b233SGeorge Cherian 	return ret;
221c694b233SGeorge Cherian }
222c694b233SGeorge Cherian 
send_cpt_command(struct cpt_vf * cptvf,union cpt_inst_s * cmd,u32 qno)223cd1af982SYueHaibing static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
224c694b233SGeorge Cherian 		     u32 qno)
225c694b233SGeorge Cherian {
226c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
227c694b233SGeorge Cherian 	struct command_qinfo *qinfo = NULL;
228c694b233SGeorge Cherian 	struct command_queue *queue;
229c694b233SGeorge Cherian 	struct command_chunk *chunk;
230c694b233SGeorge Cherian 	u8 *ent;
231c694b233SGeorge Cherian 	int ret = 0;
232c694b233SGeorge Cherian 
233c694b233SGeorge Cherian 	if (unlikely(qno >= cptvf->nr_queues)) {
234c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n",
235c694b233SGeorge Cherian 			qno, cptvf->nr_queues);
236c694b233SGeorge Cherian 		return -EINVAL;
237c694b233SGeorge Cherian 	}
238c694b233SGeorge Cherian 
239c694b233SGeorge Cherian 	qinfo = &cptvf->cqinfo;
240c694b233SGeorge Cherian 	queue = &qinfo->queue[qno];
241c694b233SGeorge Cherian 	/* lock commad queue */
242c694b233SGeorge Cherian 	spin_lock(&queue->lock);
243c694b233SGeorge Cherian 	ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
244c694b233SGeorge Cherian 	memcpy(ent, (void *)cmd, qinfo->cmd_size);
245c694b233SGeorge Cherian 
246c694b233SGeorge Cherian 	if (++queue->idx >= queue->qhead->size / 64) {
247fa8edbb6SChristophe JAILLET 		hlist_for_each_entry(chunk, &queue->chead, nextchunk) {
248c694b233SGeorge Cherian 			if (chunk == queue->qhead) {
249c694b233SGeorge Cherian 				continue;
250c694b233SGeorge Cherian 			} else {
251c694b233SGeorge Cherian 				queue->qhead = chunk;
252c694b233SGeorge Cherian 				break;
253c694b233SGeorge Cherian 			}
254c694b233SGeorge Cherian 		}
255c694b233SGeorge Cherian 		queue->idx = 0;
256c694b233SGeorge Cherian 	}
257c694b233SGeorge Cherian 	/* make sure all memory stores are done before ringing doorbell */
258c694b233SGeorge Cherian 	smp_wmb();
259c694b233SGeorge Cherian 	cptvf_write_vq_doorbell(cptvf, 1);
260c694b233SGeorge Cherian 	/* unlock command queue */
261c694b233SGeorge Cherian 	spin_unlock(&queue->lock);
262c694b233SGeorge Cherian 
263c694b233SGeorge Cherian 	return ret;
264c694b233SGeorge Cherian }
265c694b233SGeorge Cherian 
do_request_cleanup(struct cpt_vf * cptvf,struct cpt_info_buffer * info)266cd1af982SYueHaibing static void do_request_cleanup(struct cpt_vf *cptvf,
267c694b233SGeorge Cherian 			struct cpt_info_buffer *info)
268c694b233SGeorge Cherian {
269c694b233SGeorge Cherian 	int i;
270c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
271c694b233SGeorge Cherian 	struct cpt_request_info *req;
272c694b233SGeorge Cherian 
273c694b233SGeorge Cherian 	if (info->dptr_baddr)
274c694b233SGeorge Cherian 		dma_unmap_single(&pdev->dev, info->dptr_baddr,
275c694b233SGeorge Cherian 				 info->dlen, DMA_BIDIRECTIONAL);
276c694b233SGeorge Cherian 
277c694b233SGeorge Cherian 	if (info->rptr_baddr)
278c694b233SGeorge Cherian 		dma_unmap_single(&pdev->dev, info->rptr_baddr,
279c694b233SGeorge Cherian 				 COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL);
280c694b233SGeorge Cherian 
281c694b233SGeorge Cherian 	if (info->comp_baddr)
282c694b233SGeorge Cherian 		dma_unmap_single(&pdev->dev, info->comp_baddr,
283c694b233SGeorge Cherian 				 sizeof(union cpt_res_s), DMA_BIDIRECTIONAL);
284c694b233SGeorge Cherian 
285c694b233SGeorge Cherian 	if (info->req) {
286c694b233SGeorge Cherian 		req = info->req;
287c694b233SGeorge Cherian 		for (i = 0; i < req->outcnt; i++) {
288c694b233SGeorge Cherian 			if (req->out[i].dma_addr)
289c694b233SGeorge Cherian 				dma_unmap_single(&pdev->dev,
290c694b233SGeorge Cherian 						 req->out[i].dma_addr,
291c694b233SGeorge Cherian 						 req->out[i].size,
292c694b233SGeorge Cherian 						 DMA_BIDIRECTIONAL);
293c694b233SGeorge Cherian 		}
294c694b233SGeorge Cherian 
295c694b233SGeorge Cherian 		for (i = 0; i < req->incnt; i++) {
296c694b233SGeorge Cherian 			if (req->in[i].dma_addr)
297c694b233SGeorge Cherian 				dma_unmap_single(&pdev->dev,
298c694b233SGeorge Cherian 						 req->in[i].dma_addr,
299c694b233SGeorge Cherian 						 req->in[i].size,
300c694b233SGeorge Cherian 						 DMA_BIDIRECTIONAL);
301c694b233SGeorge Cherian 		}
302c694b233SGeorge Cherian 	}
303c694b233SGeorge Cherian 
304453431a5SWaiman Long 	kfree_sensitive(info->scatter_components);
305453431a5SWaiman Long 	kfree_sensitive(info->gather_components);
306453431a5SWaiman Long 	kfree_sensitive(info->out_buffer);
307453431a5SWaiman Long 	kfree_sensitive(info->in_buffer);
308453431a5SWaiman Long 	kfree_sensitive((void *)info->completion_addr);
309453431a5SWaiman Long 	kfree_sensitive(info);
310c694b233SGeorge Cherian }
311c694b233SGeorge Cherian 
do_post_process(struct cpt_vf * cptvf,struct cpt_info_buffer * info)312cd1af982SYueHaibing static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
313c694b233SGeorge Cherian {
314c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
315c694b233SGeorge Cherian 
3169bd82904SGeorge Cherian 	if (!info) {
3179bd82904SGeorge Cherian 		dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n");
318c694b233SGeorge Cherian 		return;
319c694b233SGeorge Cherian 	}
320c694b233SGeorge Cherian 
321c694b233SGeorge Cherian 	do_request_cleanup(cptvf, info);
322c694b233SGeorge Cherian }
323c694b233SGeorge Cherian 
process_pending_queue(struct cpt_vf * cptvf,struct pending_qinfo * pqinfo,int qno)324c694b233SGeorge Cherian static inline void process_pending_queue(struct cpt_vf *cptvf,
325c694b233SGeorge Cherian 					 struct pending_qinfo *pqinfo,
326c694b233SGeorge Cherian 					 int qno)
327c694b233SGeorge Cherian {
328c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
329c694b233SGeorge Cherian 	struct pending_queue *pqueue = &pqinfo->queue[qno];
330c694b233SGeorge Cherian 	struct pending_entry *pentry = NULL;
331c694b233SGeorge Cherian 	struct cpt_info_buffer *info = NULL;
332c694b233SGeorge Cherian 	union cpt_res_s *status = NULL;
333c694b233SGeorge Cherian 	unsigned char ccode;
334c694b233SGeorge Cherian 
335c694b233SGeorge Cherian 	while (1) {
336c694b233SGeorge Cherian 		spin_lock_bh(&pqueue->lock);
337c694b233SGeorge Cherian 		pentry = &pqueue->head[pqueue->front];
338c694b233SGeorge Cherian 		if (unlikely(!pentry->busy)) {
339c694b233SGeorge Cherian 			spin_unlock_bh(&pqueue->lock);
340c694b233SGeorge Cherian 			break;
341c694b233SGeorge Cherian 		}
342c694b233SGeorge Cherian 
343c694b233SGeorge Cherian 		info = (struct cpt_info_buffer *)pentry->post_arg;
344c694b233SGeorge Cherian 		if (unlikely(!info)) {
345c694b233SGeorge Cherian 			dev_err(&pdev->dev, "Pending Entry post arg NULL\n");
346c694b233SGeorge Cherian 			pending_queue_inc_front(pqinfo, qno);
347c694b233SGeorge Cherian 			spin_unlock_bh(&pqueue->lock);
348c694b233SGeorge Cherian 			continue;
349c694b233SGeorge Cherian 		}
350c694b233SGeorge Cherian 
351c694b233SGeorge Cherian 		status = (union cpt_res_s *)pentry->completion_addr;
352c694b233SGeorge Cherian 		ccode = status->s.compcode;
353c694b233SGeorge Cherian 		if ((status->s.compcode == CPT_COMP_E_FAULT) ||
354c694b233SGeorge Cherian 		    (status->s.compcode == CPT_COMP_E_SWERR)) {
355c694b233SGeorge Cherian 			dev_err(&pdev->dev, "Request failed with %s\n",
356c694b233SGeorge Cherian 				(status->s.compcode == CPT_COMP_E_FAULT) ?
357c694b233SGeorge Cherian 				"DMA Fault" : "Software error");
358c694b233SGeorge Cherian 			pentry->completion_addr = NULL;
359c694b233SGeorge Cherian 			pentry->busy = false;
360c694b233SGeorge Cherian 			atomic64_dec((&pqueue->pending_count));
361c694b233SGeorge Cherian 			pentry->post_arg = NULL;
362c694b233SGeorge Cherian 			pending_queue_inc_front(pqinfo, qno);
363c694b233SGeorge Cherian 			do_request_cleanup(cptvf, info);
364c694b233SGeorge Cherian 			spin_unlock_bh(&pqueue->lock);
365c694b233SGeorge Cherian 			break;
366c694b233SGeorge Cherian 		} else if (status->s.compcode == COMPLETION_CODE_INIT) {
367c694b233SGeorge Cherian 			/* check for timeout */
368c694b233SGeorge Cherian 			if (time_after_eq(jiffies,
369c694b233SGeorge Cherian 					  (info->time_in +
370c694b233SGeorge Cherian 					  (CPT_COMMAND_TIMEOUT * HZ)))) {
371c694b233SGeorge Cherian 				dev_err(&pdev->dev, "Request timed out");
372c694b233SGeorge Cherian 				pentry->completion_addr = NULL;
373c694b233SGeorge Cherian 				pentry->busy = false;
374c694b233SGeorge Cherian 				atomic64_dec((&pqueue->pending_count));
375c694b233SGeorge Cherian 				pentry->post_arg = NULL;
376c694b233SGeorge Cherian 				pending_queue_inc_front(pqinfo, qno);
377c694b233SGeorge Cherian 				do_request_cleanup(cptvf, info);
378c694b233SGeorge Cherian 				spin_unlock_bh(&pqueue->lock);
379c694b233SGeorge Cherian 				break;
380c694b233SGeorge Cherian 			} else if ((*info->alternate_caddr ==
381c694b233SGeorge Cherian 				(~COMPLETION_CODE_INIT)) &&
382c694b233SGeorge Cherian 				(info->extra_time < TIME_IN_RESET_COUNT)) {
383c694b233SGeorge Cherian 				info->time_in = jiffies;
384c694b233SGeorge Cherian 				info->extra_time++;
385c694b233SGeorge Cherian 				spin_unlock_bh(&pqueue->lock);
386c694b233SGeorge Cherian 				break;
387c694b233SGeorge Cherian 			}
388c694b233SGeorge Cherian 		}
389c694b233SGeorge Cherian 
390c694b233SGeorge Cherian 		pentry->completion_addr = NULL;
391c694b233SGeorge Cherian 		pentry->busy = false;
392c694b233SGeorge Cherian 		pentry->post_arg = NULL;
393c694b233SGeorge Cherian 		atomic64_dec((&pqueue->pending_count));
394c694b233SGeorge Cherian 		pending_queue_inc_front(pqinfo, qno);
395c694b233SGeorge Cherian 		spin_unlock_bh(&pqueue->lock);
396c694b233SGeorge Cherian 
397c694b233SGeorge Cherian 		do_post_process(info->cptvf, info);
398c694b233SGeorge Cherian 		/*
399c694b233SGeorge Cherian 		 * Calling callback after we find
400c694b233SGeorge Cherian 		 * that the request has been serviced
401c694b233SGeorge Cherian 		 */
402c694b233SGeorge Cherian 		pentry->callback(ccode, pentry->callback_arg);
403c694b233SGeorge Cherian 	}
404c694b233SGeorge Cherian }
405c694b233SGeorge Cherian 
process_request(struct cpt_vf * cptvf,struct cpt_request_info * req)406c694b233SGeorge Cherian int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
407c694b233SGeorge Cherian {
408c694b233SGeorge Cherian 	int ret = 0, clear = 0, queue = 0;
409c694b233SGeorge Cherian 	struct cpt_info_buffer *info = NULL;
410c694b233SGeorge Cherian 	struct cptvf_request *cpt_req = NULL;
411c694b233SGeorge Cherian 	union ctrl_info *ctrl = NULL;
412c694b233SGeorge Cherian 	union cpt_res_s *result = NULL;
413c694b233SGeorge Cherian 	struct pending_entry *pentry = NULL;
414c694b233SGeorge Cherian 	struct pending_queue *pqueue = NULL;
415c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
416c694b233SGeorge Cherian 	u8 group = 0;
417c694b233SGeorge Cherian 	struct cpt_vq_command vq_cmd;
418c694b233SGeorge Cherian 	union cpt_inst_s cptinst;
419c694b233SGeorge Cherian 
4209e27c991SMikulas Patocka 	info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
421c694b233SGeorge Cherian 	if (unlikely(!info)) {
422c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
423c694b233SGeorge Cherian 		return -ENOMEM;
424c694b233SGeorge Cherian 	}
425c694b233SGeorge Cherian 
426c694b233SGeorge Cherian 	cpt_req = (struct cptvf_request *)&req->req;
427c694b233SGeorge Cherian 	ctrl = (union ctrl_info *)&req->ctrl;
428c694b233SGeorge Cherian 
429c694b233SGeorge Cherian 	info->cptvf = cptvf;
430c694b233SGeorge Cherian 	group = ctrl->s.grp;
431c694b233SGeorge Cherian 	ret = setup_sgio_list(cptvf, info, req);
432c694b233SGeorge Cherian 	if (ret) {
433c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Setting up SG list failed");
434c694b233SGeorge Cherian 		goto request_cleanup;
435c694b233SGeorge Cherian 	}
436c694b233SGeorge Cherian 
437c694b233SGeorge Cherian 	cpt_req->dlen = info->dlen;
438c694b233SGeorge Cherian 	/*
439c694b233SGeorge Cherian 	 * Get buffer for union cpt_res_s response
440c694b233SGeorge Cherian 	 * structure and its physical address
441c694b233SGeorge Cherian 	 */
4429e27c991SMikulas Patocka 	info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
443c694b233SGeorge Cherian 	if (unlikely(!info->completion_addr)) {
444c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
44587aae50aSColin Ian King 		ret = -ENOMEM;
44687aae50aSColin Ian King 		goto request_cleanup;
447c694b233SGeorge Cherian 	}
448c694b233SGeorge Cherian 
449c694b233SGeorge Cherian 	result = (union cpt_res_s *)info->completion_addr;
450c694b233SGeorge Cherian 	result->s.compcode = COMPLETION_CODE_INIT;
451c694b233SGeorge Cherian 	info->comp_baddr = dma_map_single(&pdev->dev,
452c694b233SGeorge Cherian 					       (void *)info->completion_addr,
453c694b233SGeorge Cherian 					       sizeof(union cpt_res_s),
454c694b233SGeorge Cherian 					       DMA_BIDIRECTIONAL);
455c694b233SGeorge Cherian 	if (dma_mapping_error(&pdev->dev, info->comp_baddr)) {
456c694b233SGeorge Cherian 		dev_err(&pdev->dev, "mapping compptr Failed %lu\n",
457c694b233SGeorge Cherian 			sizeof(union cpt_res_s));
458c694b233SGeorge Cherian 		ret = -EFAULT;
459c694b233SGeorge Cherian 		goto  request_cleanup;
460c694b233SGeorge Cherian 	}
461c694b233SGeorge Cherian 
462c694b233SGeorge Cherian 	/* Fill the VQ command */
463c694b233SGeorge Cherian 	vq_cmd.cmd.u64 = 0;
464c694b233SGeorge Cherian 	vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
465c694b233SGeorge Cherian 	vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
466c694b233SGeorge Cherian 	vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
467c694b233SGeorge Cherian 	vq_cmd.cmd.s.dlen   = cpu_to_be16(cpt_req->dlen);
468c694b233SGeorge Cherian 
469c694b233SGeorge Cherian 	vq_cmd.dptr = info->dptr_baddr;
470c694b233SGeorge Cherian 	vq_cmd.rptr = info->rptr_baddr;
471c694b233SGeorge Cherian 	vq_cmd.cptr.u64 = 0;
472c694b233SGeorge Cherian 	vq_cmd.cptr.s.grp = group;
473c694b233SGeorge Cherian 	/* Get Pending Entry to submit command */
474c694b233SGeorge Cherian 	/* Always queue 0, because 1 queue per VF */
475c694b233SGeorge Cherian 	queue = 0;
476c694b233SGeorge Cherian 	pqueue = &cptvf->pqinfo.queue[queue];
477c694b233SGeorge Cherian 
478c694b233SGeorge Cherian 	if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) {
479c694b233SGeorge Cherian 		dev_err(&pdev->dev, "pending threshold reached\n");
480c694b233SGeorge Cherian 		process_pending_queue(cptvf, &cptvf->pqinfo, queue);
481c694b233SGeorge Cherian 	}
482c694b233SGeorge Cherian 
483c694b233SGeorge Cherian get_pending_entry:
484c694b233SGeorge Cherian 	spin_lock_bh(&pqueue->lock);
485c694b233SGeorge Cherian 	pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen);
486c694b233SGeorge Cherian 	if (unlikely(!pentry)) {
487c694b233SGeorge Cherian 		spin_unlock_bh(&pqueue->lock);
488c694b233SGeorge Cherian 		if (clear == 0) {
489c694b233SGeorge Cherian 			process_pending_queue(cptvf, &cptvf->pqinfo, queue);
490c694b233SGeorge Cherian 			clear = 1;
491c694b233SGeorge Cherian 			goto get_pending_entry;
492c694b233SGeorge Cherian 		}
493c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Get free entry failed\n");
494c694b233SGeorge Cherian 		dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n",
495c694b233SGeorge Cherian 			queue, pqueue->rear, pqueue->front);
496c694b233SGeorge Cherian 		ret = -EFAULT;
497c694b233SGeorge Cherian 		goto request_cleanup;
498c694b233SGeorge Cherian 	}
499c694b233SGeorge Cherian 
500c694b233SGeorge Cherian 	pentry->completion_addr = info->completion_addr;
501c694b233SGeorge Cherian 	pentry->post_arg = (void *)info;
502c694b233SGeorge Cherian 	pentry->callback = req->callback;
503c694b233SGeorge Cherian 	pentry->callback_arg = req->callback_arg;
504c694b233SGeorge Cherian 	info->pentry = pentry;
505c694b233SGeorge Cherian 	pentry->busy = true;
506c694b233SGeorge Cherian 	atomic64_inc(&pqueue->pending_count);
507c694b233SGeorge Cherian 
508c694b233SGeorge Cherian 	/* Send CPT command */
509c694b233SGeorge Cherian 	info->pentry = pentry;
510c694b233SGeorge Cherian 	info->time_in = jiffies;
511c694b233SGeorge Cherian 	info->req = req;
512c694b233SGeorge Cherian 
513c694b233SGeorge Cherian 	/* Create the CPT_INST_S type command for HW intrepretation */
514c694b233SGeorge Cherian 	cptinst.s.doneint = true;
515c694b233SGeorge Cherian 	cptinst.s.res_addr = (u64)info->comp_baddr;
516c694b233SGeorge Cherian 	cptinst.s.tag = 0;
517c694b233SGeorge Cherian 	cptinst.s.grp = 0;
518c694b233SGeorge Cherian 	cptinst.s.wq_ptr = 0;
519c694b233SGeorge Cherian 	cptinst.s.ei0 = vq_cmd.cmd.u64;
520c694b233SGeorge Cherian 	cptinst.s.ei1 = vq_cmd.dptr;
521c694b233SGeorge Cherian 	cptinst.s.ei2 = vq_cmd.rptr;
522c694b233SGeorge Cherian 	cptinst.s.ei3 = vq_cmd.cptr.u64;
523c694b233SGeorge Cherian 
524c694b233SGeorge Cherian 	ret = send_cpt_command(cptvf, &cptinst, queue);
525c694b233SGeorge Cherian 	spin_unlock_bh(&pqueue->lock);
526c694b233SGeorge Cherian 	if (unlikely(ret)) {
527c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Send command failed for AE\n");
528c694b233SGeorge Cherian 		ret = -EFAULT;
529c694b233SGeorge Cherian 		goto request_cleanup;
530c694b233SGeorge Cherian 	}
531c694b233SGeorge Cherian 
532c694b233SGeorge Cherian 	return 0;
533c694b233SGeorge Cherian 
534c694b233SGeorge Cherian request_cleanup:
535c694b233SGeorge Cherian 	dev_dbg(&pdev->dev, "Failed to submit CPT command\n");
536c694b233SGeorge Cherian 	do_request_cleanup(cptvf, info);
537c694b233SGeorge Cherian 
538c694b233SGeorge Cherian 	return ret;
539c694b233SGeorge Cherian }
540c694b233SGeorge Cherian 
vq_post_process(struct cpt_vf * cptvf,u32 qno)541c694b233SGeorge Cherian void vq_post_process(struct cpt_vf *cptvf, u32 qno)
542c694b233SGeorge Cherian {
543c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
544c694b233SGeorge Cherian 
545c694b233SGeorge Cherian 	if (unlikely(qno > cptvf->nr_queues)) {
546c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n",
547c694b233SGeorge Cherian 			qno);
548c694b233SGeorge Cherian 		return;
549c694b233SGeorge Cherian 	}
550c694b233SGeorge Cherian 
551c694b233SGeorge Cherian 	process_pending_queue(cptvf, &cptvf->pqinfo, qno);
552c694b233SGeorge Cherian }
553c694b233SGeorge Cherian 
cptvf_do_request(void * vfdev,struct cpt_request_info * req)554c694b233SGeorge Cherian int cptvf_do_request(void *vfdev, struct cpt_request_info *req)
555c694b233SGeorge Cherian {
556c694b233SGeorge Cherian 	struct cpt_vf *cptvf = (struct cpt_vf *)vfdev;
557c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
558c694b233SGeorge Cherian 
559c694b233SGeorge Cherian 	if (!cpt_device_ready(cptvf)) {
560c694b233SGeorge Cherian 		dev_err(&pdev->dev, "CPT Device is not ready");
561c694b233SGeorge Cherian 		return -ENODEV;
562c694b233SGeorge Cherian 	}
563c694b233SGeorge Cherian 
564c694b233SGeorge Cherian 	if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) {
565c694b233SGeorge Cherian 		dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request",
566c694b233SGeorge Cherian 			cptvf->vfid);
567c694b233SGeorge Cherian 		return -EINVAL;
568c694b233SGeorge Cherian 	} else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) {
569c694b233SGeorge Cherian 		dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request",
570c694b233SGeorge Cherian 			cptvf->vfid);
571c694b233SGeorge Cherian 		return -EINVAL;
572c694b233SGeorge Cherian 	}
573c694b233SGeorge Cherian 
574c694b233SGeorge Cherian 	return process_request(cptvf, req);
575c694b233SGeorge Cherian }
576