1 /*
2  * Copyright (c) 2015-2016 Quantenna Communications, Inc.
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * as published by the Free Software Foundation; either version 2
8  * of the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/types.h>
18 #include <linux/io.h>
19 
20 #include "shm_ipc.h"
21 
22 #undef pr_fmt
23 #define pr_fmt(fmt)	"qtnfmac shm_ipc: %s: " fmt, __func__
24 
25 static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
26 {
27 	const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
28 
29 	return (flags & QTNF_SHM_IPC_NEW_DATA);
30 }
31 
32 static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
33 {
34 	size_t size;
35 	bool rx_buff_ok = true;
36 	struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
37 
38 	shm_reg_hdr = &ipc->shm_region->headroom.hdr;
39 
40 	size = readw(&shm_reg_hdr->data_len);
41 
42 	if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
43 		pr_err("wrong rx packet size: %zu\n", size);
44 		rx_buff_ok = false;
45 	}
46 
47 	if (likely(rx_buff_ok)) {
48 		ipc->rx_packet_count++;
49 		ipc->rx_callback.fn(ipc->rx_callback.arg,
50 				    ipc->shm_region->data, size);
51 	}
52 
53 	writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
54 	readl(&shm_reg_hdr->flags); /* flush PCIe write */
55 
56 	ipc->interrupt.fn(ipc->interrupt.arg);
57 }
58 
59 static void qtnf_shm_ipc_irq_work(struct work_struct *work)
60 {
61 	struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
62 						irq_work);
63 
64 	while (qtnf_shm_ipc_has_new_data(ipc))
65 		qtnf_shm_handle_new_data(ipc);
66 }
67 
68 static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
69 {
70 	u32 flags;
71 
72 	flags = readl(&ipc->shm_region->headroom.hdr.flags);
73 
74 	if (flags & QTNF_SHM_IPC_NEW_DATA)
75 		queue_work(ipc->workqueue, &ipc->irq_work);
76 }
77 
78 static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
79 {
80 	u32 flags;
81 
82 	if (!READ_ONCE(ipc->waiting_for_ack))
83 		return;
84 
85 	flags = readl(&ipc->shm_region->headroom.hdr.flags);
86 
87 	if (flags & QTNF_SHM_IPC_ACK) {
88 		WRITE_ONCE(ipc->waiting_for_ack, 0);
89 		complete(&ipc->tx_completion);
90 	}
91 }
92 
93 int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
94 		      enum qtnf_shm_ipc_direction direction,
95 		      struct qtnf_shm_ipc_region __iomem *shm_region,
96 		      struct workqueue_struct *workqueue,
97 		      const struct qtnf_shm_ipc_int *interrupt,
98 		      const struct qtnf_shm_ipc_rx_callback *rx_callback)
99 {
100 	BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
101 		     QTN_IPC_REG_HDR_SZ);
102 	BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
103 
104 	ipc->shm_region = shm_region;
105 	ipc->direction = direction;
106 	ipc->interrupt = *interrupt;
107 	ipc->rx_callback = *rx_callback;
108 	ipc->tx_packet_count = 0;
109 	ipc->rx_packet_count = 0;
110 	ipc->workqueue = workqueue;
111 	ipc->waiting_for_ack = 0;
112 	ipc->tx_timeout_count = 0;
113 
114 	switch (direction) {
115 	case QTNF_SHM_IPC_OUTBOUND:
116 		ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
117 		break;
118 	case QTNF_SHM_IPC_INBOUND:
119 		ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
120 		break;
121 	default:
122 		return -EINVAL;
123 	}
124 
125 	INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
126 	init_completion(&ipc->tx_completion);
127 
128 	return 0;
129 }
130 
131 void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
132 {
133 	complete_all(&ipc->tx_completion);
134 }
135 
136 int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
137 {
138 	int ret = 0;
139 	struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
140 
141 	shm_reg_hdr = &ipc->shm_region->headroom.hdr;
142 
143 	if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
144 		return -E2BIG;
145 
146 	ipc->tx_packet_count++;
147 
148 	writew(size, &shm_reg_hdr->data_len);
149 	memcpy_toio(ipc->shm_region->data, buf, size);
150 
151 	/* sync previous writes before proceeding */
152 	dma_wmb();
153 
154 	WRITE_ONCE(ipc->waiting_for_ack, 1);
155 
156 	/* sync previous memory write before announcing new data ready */
157 	wmb();
158 
159 	writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
160 	readl(&shm_reg_hdr->flags); /* flush PCIe write */
161 
162 	ipc->interrupt.fn(ipc->interrupt.arg);
163 
164 	if (!wait_for_completion_timeout(&ipc->tx_completion,
165 					 QTN_SHM_IPC_ACK_TIMEOUT)) {
166 		ret = -ETIMEDOUT;
167 		ipc->tx_timeout_count++;
168 		pr_err("TX ACK timeout\n");
169 	}
170 
171 	/* now we're not waiting for ACK even in case of timeout */
172 	WRITE_ONCE(ipc->waiting_for_ack, 0);
173 
174 	return ret;
175 }
176