13703f53bSSrinivas Pandruvada /*
23703f53bSSrinivas Pandruvada  * ISHTP Ring Buffers
33703f53bSSrinivas Pandruvada  *
43703f53bSSrinivas Pandruvada  * Copyright (c) 2003-2016, Intel Corporation.
53703f53bSSrinivas Pandruvada  *
63703f53bSSrinivas Pandruvada  * This program is free software; you can redistribute it and/or modify it
73703f53bSSrinivas Pandruvada  * under the terms and conditions of the GNU General Public License,
83703f53bSSrinivas Pandruvada  * version 2, as published by the Free Software Foundation.
93703f53bSSrinivas Pandruvada  *
103703f53bSSrinivas Pandruvada  * This program is distributed in the hope it will be useful, but WITHOUT
113703f53bSSrinivas Pandruvada  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
123703f53bSSrinivas Pandruvada  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
133703f53bSSrinivas Pandruvada  * more details.
143703f53bSSrinivas Pandruvada  *
153703f53bSSrinivas Pandruvada  */
163703f53bSSrinivas Pandruvada 
173703f53bSSrinivas Pandruvada #include <linux/slab.h>
183703f53bSSrinivas Pandruvada #include "client.h"
193703f53bSSrinivas Pandruvada 
203703f53bSSrinivas Pandruvada /**
213703f53bSSrinivas Pandruvada  * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
223703f53bSSrinivas Pandruvada  * @cl: client device instance
233703f53bSSrinivas Pandruvada  *
243703f53bSSrinivas Pandruvada  * Allocate and initialize RX ring buffers
253703f53bSSrinivas Pandruvada  *
263703f53bSSrinivas Pandruvada  * Return: 0 on success else -ENOMEM
273703f53bSSrinivas Pandruvada  */
283703f53bSSrinivas Pandruvada int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
293703f53bSSrinivas Pandruvada {
303703f53bSSrinivas Pandruvada 	size_t	len = cl->device->fw_client->props.max_msg_length;
313703f53bSSrinivas Pandruvada 	int	j;
323703f53bSSrinivas Pandruvada 	struct ishtp_cl_rb *rb;
333703f53bSSrinivas Pandruvada 	int	ret = 0;
343703f53bSSrinivas Pandruvada 	unsigned long	flags;
353703f53bSSrinivas Pandruvada 
363703f53bSSrinivas Pandruvada 	for (j = 0; j < cl->rx_ring_size; ++j) {
373703f53bSSrinivas Pandruvada 		rb = ishtp_io_rb_init(cl);
383703f53bSSrinivas Pandruvada 		if (!rb) {
393703f53bSSrinivas Pandruvada 			ret = -ENOMEM;
403703f53bSSrinivas Pandruvada 			goto out;
413703f53bSSrinivas Pandruvada 		}
423703f53bSSrinivas Pandruvada 		ret = ishtp_io_rb_alloc_buf(rb, len);
433703f53bSSrinivas Pandruvada 		if (ret)
443703f53bSSrinivas Pandruvada 			goto out;
453703f53bSSrinivas Pandruvada 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
463703f53bSSrinivas Pandruvada 		list_add_tail(&rb->list, &cl->free_rb_list.list);
473703f53bSSrinivas Pandruvada 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
483703f53bSSrinivas Pandruvada 	}
493703f53bSSrinivas Pandruvada 
503703f53bSSrinivas Pandruvada 	return	0;
513703f53bSSrinivas Pandruvada 
523703f53bSSrinivas Pandruvada out:
533703f53bSSrinivas Pandruvada 	dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
543703f53bSSrinivas Pandruvada 	ishtp_cl_free_rx_ring(cl);
553703f53bSSrinivas Pandruvada 	return	ret;
563703f53bSSrinivas Pandruvada }
573703f53bSSrinivas Pandruvada 
583703f53bSSrinivas Pandruvada /**
593703f53bSSrinivas Pandruvada  * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
603703f53bSSrinivas Pandruvada  * @cl: client device instance
613703f53bSSrinivas Pandruvada  *
623703f53bSSrinivas Pandruvada  * Allocate and initialize TX ring buffers
633703f53bSSrinivas Pandruvada  *
643703f53bSSrinivas Pandruvada  * Return: 0 on success else -ENOMEM
653703f53bSSrinivas Pandruvada  */
663703f53bSSrinivas Pandruvada int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
673703f53bSSrinivas Pandruvada {
683703f53bSSrinivas Pandruvada 	size_t	len = cl->device->fw_client->props.max_msg_length;
693703f53bSSrinivas Pandruvada 	int	j;
703703f53bSSrinivas Pandruvada 	unsigned long	flags;
713703f53bSSrinivas Pandruvada 
723703f53bSSrinivas Pandruvada 	/* Allocate pool to free Tx bufs */
733703f53bSSrinivas Pandruvada 	for (j = 0; j < cl->tx_ring_size; ++j) {
743703f53bSSrinivas Pandruvada 		struct ishtp_cl_tx_ring	*tx_buf;
753703f53bSSrinivas Pandruvada 
763703f53bSSrinivas Pandruvada 		tx_buf = kmalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
773703f53bSSrinivas Pandruvada 		if (!tx_buf)
783703f53bSSrinivas Pandruvada 			goto	out;
793703f53bSSrinivas Pandruvada 
803703f53bSSrinivas Pandruvada 		memset(tx_buf, 0, sizeof(struct ishtp_cl_tx_ring));
813703f53bSSrinivas Pandruvada 		tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
823703f53bSSrinivas Pandruvada 		if (!tx_buf->send_buf.data) {
833703f53bSSrinivas Pandruvada 			kfree(tx_buf);
843703f53bSSrinivas Pandruvada 			goto	out;
853703f53bSSrinivas Pandruvada 		}
863703f53bSSrinivas Pandruvada 
873703f53bSSrinivas Pandruvada 		spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
883703f53bSSrinivas Pandruvada 		list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
893703f53bSSrinivas Pandruvada 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
903703f53bSSrinivas Pandruvada 	}
913703f53bSSrinivas Pandruvada 	return	0;
923703f53bSSrinivas Pandruvada out:
933703f53bSSrinivas Pandruvada 	dev_err(&cl->device->dev, "error in allocating Tx pool\n");
943703f53bSSrinivas Pandruvada 	ishtp_cl_free_rx_ring(cl);
953703f53bSSrinivas Pandruvada 	return	-ENOMEM;
963703f53bSSrinivas Pandruvada }
973703f53bSSrinivas Pandruvada 
983703f53bSSrinivas Pandruvada /**
993703f53bSSrinivas Pandruvada  * ishtp_cl_free_rx_ring() - Free RX ring buffers
1003703f53bSSrinivas Pandruvada  * @cl: client device instance
1013703f53bSSrinivas Pandruvada  *
1023703f53bSSrinivas Pandruvada  * Free RX ring buffers
1033703f53bSSrinivas Pandruvada  */
1043703f53bSSrinivas Pandruvada void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
1053703f53bSSrinivas Pandruvada {
1063703f53bSSrinivas Pandruvada 	struct ishtp_cl_rb *rb;
1073703f53bSSrinivas Pandruvada 	unsigned long	flags;
1083703f53bSSrinivas Pandruvada 
1093703f53bSSrinivas Pandruvada 	/* release allocated memory - pass over free_rb_list */
1103703f53bSSrinivas Pandruvada 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
1113703f53bSSrinivas Pandruvada 	while (!list_empty(&cl->free_rb_list.list)) {
1123703f53bSSrinivas Pandruvada 		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
1133703f53bSSrinivas Pandruvada 				list);
1143703f53bSSrinivas Pandruvada 		list_del(&rb->list);
1153703f53bSSrinivas Pandruvada 		kfree(rb->buffer.data);
1163703f53bSSrinivas Pandruvada 		kfree(rb);
1173703f53bSSrinivas Pandruvada 	}
1183703f53bSSrinivas Pandruvada 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
1193703f53bSSrinivas Pandruvada 	/* release allocated memory - pass over in_process_list */
1203703f53bSSrinivas Pandruvada 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
1213703f53bSSrinivas Pandruvada 	while (!list_empty(&cl->in_process_list.list)) {
1223703f53bSSrinivas Pandruvada 		rb = list_entry(cl->in_process_list.list.next,
1233703f53bSSrinivas Pandruvada 				struct ishtp_cl_rb, list);
1243703f53bSSrinivas Pandruvada 		list_del(&rb->list);
1253703f53bSSrinivas Pandruvada 		kfree(rb->buffer.data);
1263703f53bSSrinivas Pandruvada 		kfree(rb);
1273703f53bSSrinivas Pandruvada 	}
1283703f53bSSrinivas Pandruvada 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
1293703f53bSSrinivas Pandruvada }
1303703f53bSSrinivas Pandruvada 
1313703f53bSSrinivas Pandruvada /**
1323703f53bSSrinivas Pandruvada  * ishtp_cl_free_tx_ring() - Free TX ring buffers
1333703f53bSSrinivas Pandruvada  * @cl: client device instance
1343703f53bSSrinivas Pandruvada  *
1353703f53bSSrinivas Pandruvada  * Free TX ring buffers
1363703f53bSSrinivas Pandruvada  */
1373703f53bSSrinivas Pandruvada void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
1383703f53bSSrinivas Pandruvada {
1393703f53bSSrinivas Pandruvada 	struct ishtp_cl_tx_ring	*tx_buf;
1403703f53bSSrinivas Pandruvada 	unsigned long	flags;
1413703f53bSSrinivas Pandruvada 
1423703f53bSSrinivas Pandruvada 	spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
1433703f53bSSrinivas Pandruvada 	/* release allocated memory - pass over tx_free_list */
1443703f53bSSrinivas Pandruvada 	while (!list_empty(&cl->tx_free_list.list)) {
1453703f53bSSrinivas Pandruvada 		tx_buf = list_entry(cl->tx_free_list.list.next,
1463703f53bSSrinivas Pandruvada 				    struct ishtp_cl_tx_ring, list);
1473703f53bSSrinivas Pandruvada 		list_del(&tx_buf->list);
1483703f53bSSrinivas Pandruvada 		kfree(tx_buf->send_buf.data);
1493703f53bSSrinivas Pandruvada 		kfree(tx_buf);
1503703f53bSSrinivas Pandruvada 	}
1513703f53bSSrinivas Pandruvada 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
1523703f53bSSrinivas Pandruvada 
1533703f53bSSrinivas Pandruvada 	spin_lock_irqsave(&cl->tx_list_spinlock, flags);
1543703f53bSSrinivas Pandruvada 	/* release allocated memory - pass over tx_list */
1553703f53bSSrinivas Pandruvada 	while (!list_empty(&cl->tx_list.list)) {
1563703f53bSSrinivas Pandruvada 		tx_buf = list_entry(cl->tx_list.list.next,
1573703f53bSSrinivas Pandruvada 				    struct ishtp_cl_tx_ring, list);
1583703f53bSSrinivas Pandruvada 		list_del(&tx_buf->list);
1593703f53bSSrinivas Pandruvada 		kfree(tx_buf->send_buf.data);
1603703f53bSSrinivas Pandruvada 		kfree(tx_buf);
1613703f53bSSrinivas Pandruvada 	}
1623703f53bSSrinivas Pandruvada 	spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
1633703f53bSSrinivas Pandruvada }
1643703f53bSSrinivas Pandruvada 
1653703f53bSSrinivas Pandruvada /**
1663703f53bSSrinivas Pandruvada  * ishtp_io_rb_free() - Free IO request block
1673703f53bSSrinivas Pandruvada  * @rb: IO request block
1683703f53bSSrinivas Pandruvada  *
1693703f53bSSrinivas Pandruvada  * Free io request block memory
1703703f53bSSrinivas Pandruvada  */
1713703f53bSSrinivas Pandruvada void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
1723703f53bSSrinivas Pandruvada {
1733703f53bSSrinivas Pandruvada 	if (rb == NULL)
1743703f53bSSrinivas Pandruvada 		return;
1753703f53bSSrinivas Pandruvada 
1763703f53bSSrinivas Pandruvada 	kfree(rb->buffer.data);
1773703f53bSSrinivas Pandruvada 	kfree(rb);
1783703f53bSSrinivas Pandruvada }
1793703f53bSSrinivas Pandruvada 
1803703f53bSSrinivas Pandruvada /**
1813703f53bSSrinivas Pandruvada  * ishtp_io_rb_init() - Allocate and init IO request block
1823703f53bSSrinivas Pandruvada  * @cl: client device instance
1833703f53bSSrinivas Pandruvada  *
1843703f53bSSrinivas Pandruvada  * Allocate and initialize request block
1853703f53bSSrinivas Pandruvada  *
1863703f53bSSrinivas Pandruvada  * Return: Allocted IO request block pointer
1873703f53bSSrinivas Pandruvada  */
1883703f53bSSrinivas Pandruvada struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
1893703f53bSSrinivas Pandruvada {
1903703f53bSSrinivas Pandruvada 	struct ishtp_cl_rb *rb;
1913703f53bSSrinivas Pandruvada 
1923703f53bSSrinivas Pandruvada 	rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
1933703f53bSSrinivas Pandruvada 	if (!rb)
1943703f53bSSrinivas Pandruvada 		return NULL;
1953703f53bSSrinivas Pandruvada 
1963703f53bSSrinivas Pandruvada 	INIT_LIST_HEAD(&rb->list);
1973703f53bSSrinivas Pandruvada 	rb->cl = cl;
1983703f53bSSrinivas Pandruvada 	rb->buf_idx = 0;
1993703f53bSSrinivas Pandruvada 	return rb;
2003703f53bSSrinivas Pandruvada }
2013703f53bSSrinivas Pandruvada 
2023703f53bSSrinivas Pandruvada /**
2033703f53bSSrinivas Pandruvada  * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
2043703f53bSSrinivas Pandruvada  * @rb: IO request block
2053703f53bSSrinivas Pandruvada  * @length: length of response buffer
2063703f53bSSrinivas Pandruvada  *
2073703f53bSSrinivas Pandruvada  * Allocate respose buffer
2083703f53bSSrinivas Pandruvada  *
2093703f53bSSrinivas Pandruvada  * Return: 0 on success else -ENOMEM
2103703f53bSSrinivas Pandruvada  */
2113703f53bSSrinivas Pandruvada int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
2123703f53bSSrinivas Pandruvada {
2133703f53bSSrinivas Pandruvada 	if (!rb)
2143703f53bSSrinivas Pandruvada 		return -EINVAL;
2153703f53bSSrinivas Pandruvada 
2163703f53bSSrinivas Pandruvada 	if (length == 0)
2173703f53bSSrinivas Pandruvada 		return 0;
2183703f53bSSrinivas Pandruvada 
2193703f53bSSrinivas Pandruvada 	rb->buffer.data = kmalloc(length, GFP_KERNEL);
2203703f53bSSrinivas Pandruvada 	if (!rb->buffer.data)
2213703f53bSSrinivas Pandruvada 		return -ENOMEM;
2223703f53bSSrinivas Pandruvada 
2233703f53bSSrinivas Pandruvada 	rb->buffer.size = length;
2243703f53bSSrinivas Pandruvada 	return 0;
2253703f53bSSrinivas Pandruvada }
2263703f53bSSrinivas Pandruvada 
2273703f53bSSrinivas Pandruvada /**
2283703f53bSSrinivas Pandruvada  * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
2293703f53bSSrinivas Pandruvada  * @rb: IO request block
2303703f53bSSrinivas Pandruvada  *
2313703f53bSSrinivas Pandruvada  * Re-append rb to its client's free list and send flow control if needed
2323703f53bSSrinivas Pandruvada  *
2333703f53bSSrinivas Pandruvada  * Return: 0 on success else -EFAULT
2343703f53bSSrinivas Pandruvada  */
2353703f53bSSrinivas Pandruvada int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
2363703f53bSSrinivas Pandruvada {
2373703f53bSSrinivas Pandruvada 	struct ishtp_cl *cl;
2383703f53bSSrinivas Pandruvada 	int	rets = 0;
2393703f53bSSrinivas Pandruvada 	unsigned long	flags;
2403703f53bSSrinivas Pandruvada 
2413703f53bSSrinivas Pandruvada 	if (!rb || !rb->cl)
2423703f53bSSrinivas Pandruvada 		return	-EFAULT;
2433703f53bSSrinivas Pandruvada 
2443703f53bSSrinivas Pandruvada 	cl = rb->cl;
2453703f53bSSrinivas Pandruvada 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
2463703f53bSSrinivas Pandruvada 	list_add_tail(&rb->list, &cl->free_rb_list.list);
2473703f53bSSrinivas Pandruvada 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
2483703f53bSSrinivas Pandruvada 
2493703f53bSSrinivas Pandruvada 	/*
2503703f53bSSrinivas Pandruvada 	 * If we returned the first buffer to empty 'free' list,
2513703f53bSSrinivas Pandruvada 	 * send flow control
2523703f53bSSrinivas Pandruvada 	 */
2533703f53bSSrinivas Pandruvada 	if (!cl->out_flow_ctrl_creds)
2543703f53bSSrinivas Pandruvada 		rets = ishtp_cl_read_start(cl);
2553703f53bSSrinivas Pandruvada 
2563703f53bSSrinivas Pandruvada 	return	rets;
2573703f53bSSrinivas Pandruvada }
2583703f53bSSrinivas Pandruvada EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
259