xref: /openbmc/linux/drivers/scsi/fnic/vnic_wq_copy.h (revision a701d28e)
1 /*
2  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * This program is free software; you may redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16  * SOFTWARE.
17  */
18 #ifndef _VNIC_WQ_COPY_H_
19 #define _VNIC_WQ_COPY_H_
20 
21 #include <linux/pci.h>
22 #include "vnic_wq.h"
23 #include "fcpio.h"
24 
25 #define	VNIC_WQ_COPY_MAX 1
26 
27 struct vnic_wq_copy {
28 	unsigned int index;
29 	struct vnic_dev *vdev;
30 	struct vnic_wq_ctrl __iomem *ctrl;	/* memory-mapped */
31 	struct vnic_dev_ring ring;
32 	unsigned to_use_index;
33 	unsigned to_clean_index;
34 };
35 
36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
37 {
38 	return wq->ring.desc_avail;
39 }
40 
41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
42 {
43 	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
44 }
45 
46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
47 {
48 	struct fcpio_host_req *desc = wq->ring.descs;
49 	return &desc[wq->to_use_index];
50 }
51 
52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
53 {
54 
55 	((wq->to_use_index + 1) == wq->ring.desc_count) ?
56 		(wq->to_use_index = 0) : (wq->to_use_index++);
57 	wq->ring.desc_avail--;
58 
59 	/* Adding write memory barrier prevents compiler and/or CPU
60 	 * reordering, thus avoiding descriptor posting before
61 	 * descriptor is initialized. Otherwise, hardware can read
62 	 * stale descriptor fields.
63 	 */
64 	wmb();
65 
66 	iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
67 }
68 
69 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
70 {
71 	unsigned int cnt;
72 
73 	if (wq->to_clean_index <= index)
74 		cnt = (index - wq->to_clean_index) + 1;
75 	else
76 		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
77 
78 	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
79 	wq->ring.desc_avail += cnt;
80 
81 }
82 
83 static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
84 	u16 completed_index,
85 	void (*q_service)(struct vnic_wq_copy *wq,
86 	struct fcpio_host_req *wq_desc))
87 {
88 	struct fcpio_host_req *wq_desc = wq->ring.descs;
89 	unsigned int curr_index;
90 
91 	while (1) {
92 
93 		if (q_service)
94 			(*q_service)(wq, &wq_desc[wq->to_clean_index]);
95 
96 		wq->ring.desc_avail++;
97 
98 		curr_index = wq->to_clean_index;
99 
100 		/* increment the to-clean index so that we start
101 		 * with an unprocessed index next time we enter the loop
102 		 */
103 		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
104 			(wq->to_clean_index = 0) : (wq->to_clean_index++);
105 
106 		if (curr_index == completed_index)
107 			break;
108 
109 		/* we have cleaned all the entries */
110 		if ((completed_index == (u16)-1) &&
111 		    (wq->to_clean_index == wq->to_use_index))
112 			break;
113 	}
114 }
115 
116 void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
117 int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
118 void vnic_wq_copy_free(struct vnic_wq_copy *wq);
119 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
120 	unsigned int index, unsigned int desc_count, unsigned int desc_size);
121 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
122 	unsigned int error_interrupt_enable,
123 	unsigned int error_interrupt_offset);
124 void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
125 	void (*q_clean)(struct vnic_wq_copy *wq,
126 	struct fcpio_host_req *wq_desc));
127 
128 #endif /* _VNIC_WQ_COPY_H_ */
129