1 /*
2  * ISHTP Ring Buffers
3  *
4  * Copyright (c) 2003-2016, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/slab.h>
18 #include "client.h"
19 
20 /**
21  * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
22  * @cl: client device instance
23  *
24  * Allocate and initialize RX ring buffers
25  *
26  * Return: 0 on success else -ENOMEM
27  */
28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
29 {
30 	size_t	len = cl->device->fw_client->props.max_msg_length;
31 	int	j;
32 	struct ishtp_cl_rb *rb;
33 	int	ret = 0;
34 	unsigned long	flags;
35 
36 	for (j = 0; j < cl->rx_ring_size; ++j) {
37 		rb = ishtp_io_rb_init(cl);
38 		if (!rb) {
39 			ret = -ENOMEM;
40 			goto out;
41 		}
42 		ret = ishtp_io_rb_alloc_buf(rb, len);
43 		if (ret)
44 			goto out;
45 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
46 		list_add_tail(&rb->list, &cl->free_rb_list.list);
47 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
48 	}
49 
50 	return	0;
51 
52 out:
53 	dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
54 	ishtp_cl_free_rx_ring(cl);
55 	return	ret;
56 }
57 
58 /**
59  * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
60  * @cl: client device instance
61  *
62  * Allocate and initialize TX ring buffers
63  *
64  * Return: 0 on success else -ENOMEM
65  */
66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
67 {
68 	size_t	len = cl->device->fw_client->props.max_msg_length;
69 	int	j;
70 	unsigned long	flags;
71 
72 	cl->tx_ring_free_size = 0;
73 
74 	/* Allocate pool to free Tx bufs */
75 	for (j = 0; j < cl->tx_ring_size; ++j) {
76 		struct ishtp_cl_tx_ring	*tx_buf;
77 
78 		tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
79 		if (!tx_buf)
80 			goto	out;
81 
82 		tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
83 		if (!tx_buf->send_buf.data) {
84 			kfree(tx_buf);
85 			goto	out;
86 		}
87 
88 		spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
89 		list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
90 		++cl->tx_ring_free_size;
91 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
92 	}
93 	return	0;
94 out:
95 	dev_err(&cl->device->dev, "error in allocating Tx pool\n");
96 	ishtp_cl_free_rx_ring(cl);
97 	return	-ENOMEM;
98 }
99 
100 /**
101  * ishtp_cl_free_rx_ring() - Free RX ring buffers
102  * @cl: client device instance
103  *
104  * Free RX ring buffers
105  */
106 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
107 {
108 	struct ishtp_cl_rb *rb;
109 	unsigned long	flags;
110 
111 	/* release allocated memory - pass over free_rb_list */
112 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
113 	while (!list_empty(&cl->free_rb_list.list)) {
114 		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
115 				list);
116 		list_del(&rb->list);
117 		kfree(rb->buffer.data);
118 		kfree(rb);
119 	}
120 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
121 	/* release allocated memory - pass over in_process_list */
122 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
123 	while (!list_empty(&cl->in_process_list.list)) {
124 		rb = list_entry(cl->in_process_list.list.next,
125 				struct ishtp_cl_rb, list);
126 		list_del(&rb->list);
127 		kfree(rb->buffer.data);
128 		kfree(rb);
129 	}
130 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
131 }
132 
133 /**
134  * ishtp_cl_free_tx_ring() - Free TX ring buffers
135  * @cl: client device instance
136  *
137  * Free TX ring buffers
138  */
139 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
140 {
141 	struct ishtp_cl_tx_ring	*tx_buf;
142 	unsigned long	flags;
143 
144 	spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
145 	/* release allocated memory - pass over tx_free_list */
146 	while (!list_empty(&cl->tx_free_list.list)) {
147 		tx_buf = list_entry(cl->tx_free_list.list.next,
148 				    struct ishtp_cl_tx_ring, list);
149 		list_del(&tx_buf->list);
150 		--cl->tx_ring_free_size;
151 		kfree(tx_buf->send_buf.data);
152 		kfree(tx_buf);
153 	}
154 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
155 
156 	spin_lock_irqsave(&cl->tx_list_spinlock, flags);
157 	/* release allocated memory - pass over tx_list */
158 	while (!list_empty(&cl->tx_list.list)) {
159 		tx_buf = list_entry(cl->tx_list.list.next,
160 				    struct ishtp_cl_tx_ring, list);
161 		list_del(&tx_buf->list);
162 		kfree(tx_buf->send_buf.data);
163 		kfree(tx_buf);
164 	}
165 	spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
166 }
167 
168 /**
169  * ishtp_io_rb_free() - Free IO request block
170  * @rb: IO request block
171  *
172  * Free io request block memory
173  */
174 void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
175 {
176 	if (rb == NULL)
177 		return;
178 
179 	kfree(rb->buffer.data);
180 	kfree(rb);
181 }
182 
183 /**
184  * ishtp_io_rb_init() - Allocate and init IO request block
185  * @cl: client device instance
186  *
187  * Allocate and initialize request block
188  *
189  * Return: Allocted IO request block pointer
190  */
191 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
192 {
193 	struct ishtp_cl_rb *rb;
194 
195 	rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
196 	if (!rb)
197 		return NULL;
198 
199 	INIT_LIST_HEAD(&rb->list);
200 	rb->cl = cl;
201 	rb->buf_idx = 0;
202 	return rb;
203 }
204 
205 /**
206  * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
207  * @rb: IO request block
208  * @length: length of response buffer
209  *
210  * Allocate respose buffer
211  *
212  * Return: 0 on success else -ENOMEM
213  */
214 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
215 {
216 	if (!rb)
217 		return -EINVAL;
218 
219 	if (length == 0)
220 		return 0;
221 
222 	rb->buffer.data = kmalloc(length, GFP_KERNEL);
223 	if (!rb->buffer.data)
224 		return -ENOMEM;
225 
226 	rb->buffer.size = length;
227 	return 0;
228 }
229 
230 /**
231  * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
232  * @rb: IO request block
233  *
234  * Re-append rb to its client's free list and send flow control if needed
235  *
236  * Return: 0 on success else -EFAULT
237  */
238 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
239 {
240 	struct ishtp_cl *cl;
241 	int	rets = 0;
242 	unsigned long	flags;
243 
244 	if (!rb || !rb->cl)
245 		return	-EFAULT;
246 
247 	cl = rb->cl;
248 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
249 	list_add_tail(&rb->list, &cl->free_rb_list.list);
250 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
251 
252 	/*
253 	 * If we returned the first buffer to empty 'free' list,
254 	 * send flow control
255 	 */
256 	if (!cl->out_flow_ctrl_creds)
257 		rets = ishtp_cl_read_start(cl);
258 
259 	return	rets;
260 }
261 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
262 
263 /**
264  * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
265  * @cl: Pointer to client device instance
266  *
267  * Look client device tx buffer list, and check whether this list is empty
268  *
269  * Return: true if client tx buffer list is empty else false
270  */
271 bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
272 {
273 	int tx_list_empty;
274 	unsigned long tx_flags;
275 
276 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
277 	tx_list_empty = list_empty(&cl->tx_list.list);
278 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
279 
280 	return !!tx_list_empty;
281 }
282 EXPORT_SYMBOL(ishtp_cl_tx_empty);
283 
284 /**
285  * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
286  * @cl: Pointer to client device instance
287  *
288  * Check client device in-processing buffer list and get a rb from it.
289  *
290  * Return: rb pointer if buffer list isn't empty else NULL
291  */
292 struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
293 {
294 	unsigned long rx_flags;
295 	struct ishtp_cl_rb *rb;
296 
297 	spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
298 	rb = list_first_entry_or_null(&cl->in_process_list.list,
299 				struct ishtp_cl_rb, list);
300 	if (rb)
301 		list_del_init(&rb->list);
302 	spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
303 
304 	return rb;
305 }
306 EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
307