1 /*
2  *  This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *  Copyright (C) 2003-2014 Chelsio Communications.  All rights reserved.
4  *
5  *  Written by Deepak (deepak.s@chelsio.com)
6  *
7  *  This program is distributed in the hope that it will be useful, but WITHOUT
8  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9  *  FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
10  *  release for licensing terms and conditions.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/jhash.h>
16 #include <linux/if_vlan.h>
17 #include <net/addrconf.h>
18 #include "cxgb4.h"
19 #include "clip_tbl.h"
20 
21 static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
22 {
23 	unsigned int clipt_size_half = c->clipt_size / 2;
24 
25 	return jhash_1word(*key, 0) % clipt_size_half;
26 }
27 
28 static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
29 {
30 	unsigned int clipt_size_half = d->clipt_size / 2;
31 	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
32 
33 	return clipt_size_half +
34 		(jhash_1word(xor, 0) % clipt_size_half);
35 }
36 
37 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 				   int addr_len)
39 {
40 	return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
41 				ipv6_clip_hash(ctbl, addr);
42 }
43 
44 static int clip6_get_mbox(const struct net_device *dev,
45 			  const struct in6_addr *lip)
46 {
47 	struct adapter *adap = netdev2adap(dev);
48 	struct fw_clip_cmd c;
49 
50 	memset(&c, 0, sizeof(c));
51 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
52 			      FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
53 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
54 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
55 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
56 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
57 }
58 
59 static int clip6_release_mbox(const struct net_device *dev,
60 			      const struct in6_addr *lip)
61 {
62 	struct adapter *adap = netdev2adap(dev);
63 	struct fw_clip_cmd c;
64 
65 	memset(&c, 0, sizeof(c));
66 	c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
67 			      FW_CMD_REQUEST_F | FW_CMD_READ_F);
68 	c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
69 	*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
70 	*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
71 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
72 }
73 
74 int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
75 {
76 	struct adapter *adap = netdev2adap(dev);
77 	struct clip_tbl *ctbl = adap->clipt;
78 	struct clip_entry *ce, *cte;
79 	u32 *addr = (u32 *)lip;
80 	int hash;
81 	int addr_len;
82 	int ret = 0;
83 
84 	if (!ctbl)
85 		return 0;
86 
87 	if (v6)
88 		addr_len = 16;
89 	else
90 		addr_len = 4;
91 
92 	hash = clip_addr_hash(ctbl, addr, addr_len);
93 
94 	read_lock_bh(&ctbl->lock);
95 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 		if (addr_len == cte->addr_len &&
97 		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
98 			ce = cte;
99 			read_unlock_bh(&ctbl->lock);
100 			goto found;
101 		}
102 	}
103 	read_unlock_bh(&ctbl->lock);
104 
105 	write_lock_bh(&ctbl->lock);
106 	if (!list_empty(&ctbl->ce_free_head)) {
107 		ce = list_first_entry(&ctbl->ce_free_head,
108 				      struct clip_entry, list);
109 		list_del(&ce->list);
110 		INIT_LIST_HEAD(&ce->list);
111 		spin_lock_init(&ce->lock);
112 		atomic_set(&ce->refcnt, 0);
113 		atomic_dec(&ctbl->nfree);
114 		ce->addr_len = addr_len;
115 		memcpy(ce->addr, lip, addr_len);
116 		list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 		if (v6) {
118 			ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 			if (ret) {
120 				write_unlock_bh(&ctbl->lock);
121 				return ret;
122 			}
123 		}
124 	} else {
125 		write_unlock_bh(&ctbl->lock);
126 		return -ENOMEM;
127 	}
128 	write_unlock_bh(&ctbl->lock);
129 found:
130 	atomic_inc(&ce->refcnt);
131 
132 	return 0;
133 }
134 EXPORT_SYMBOL(cxgb4_clip_get);
135 
136 void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
137 {
138 	struct adapter *adap = netdev2adap(dev);
139 	struct clip_tbl *ctbl = adap->clipt;
140 	struct clip_entry *ce, *cte;
141 	u32 *addr = (u32 *)lip;
142 	int hash;
143 	int addr_len;
144 
145 	if (v6)
146 		addr_len = 16;
147 	else
148 		addr_len = 4;
149 
150 	hash = clip_addr_hash(ctbl, addr, addr_len);
151 
152 	read_lock_bh(&ctbl->lock);
153 	list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 		if (addr_len == cte->addr_len &&
155 		    memcmp(lip, cte->addr, cte->addr_len) == 0) {
156 			ce = cte;
157 			read_unlock_bh(&ctbl->lock);
158 			goto found;
159 		}
160 	}
161 	read_unlock_bh(&ctbl->lock);
162 
163 	return;
164 found:
165 	write_lock_bh(&ctbl->lock);
166 	spin_lock_bh(&ce->lock);
167 	if (atomic_dec_and_test(&ce->refcnt)) {
168 		list_del(&ce->list);
169 		INIT_LIST_HEAD(&ce->list);
170 		list_add_tail(&ce->list, &ctbl->ce_free_head);
171 		atomic_inc(&ctbl->nfree);
172 		if (v6)
173 			clip6_release_mbox(dev, (const struct in6_addr *)lip);
174 	}
175 	spin_unlock_bh(&ce->lock);
176 	write_unlock_bh(&ctbl->lock);
177 }
178 EXPORT_SYMBOL(cxgb4_clip_release);
179 
180 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
181  * a physical device.
182  * The physical device reference is needed to send the actul CLIP command.
183  */
184 static int cxgb4_update_dev_clip(struct net_device *root_dev,
185 				 struct net_device *dev)
186 {
187 	struct inet6_dev *idev = NULL;
188 	struct inet6_ifaddr *ifa;
189 	int ret = 0;
190 
191 	idev = __in6_dev_get(root_dev);
192 	if (!idev)
193 		return ret;
194 
195 	read_lock_bh(&idev->lock);
196 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
197 		ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
198 		if (ret < 0)
199 			break;
200 	}
201 	read_unlock_bh(&idev->lock);
202 
203 	return ret;
204 }
205 
206 int cxgb4_update_root_dev_clip(struct net_device *dev)
207 {
208 	struct net_device *root_dev = NULL;
209 	int i, ret = 0;
210 
211 	/* First populate the real net device's IPv6 addresses */
212 	ret = cxgb4_update_dev_clip(dev, dev);
213 	if (ret)
214 		return ret;
215 
216 	/* Parse all bond and vlan devices layered on top of the physical dev */
217 	root_dev = netdev_master_upper_dev_get_rcu(dev);
218 	if (root_dev) {
219 		ret = cxgb4_update_dev_clip(root_dev, dev);
220 		if (ret)
221 			return ret;
222 	}
223 
224 	for (i = 0; i < VLAN_N_VID; i++) {
225 		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
226 		if (!root_dev)
227 			continue;
228 
229 		ret = cxgb4_update_dev_clip(root_dev, dev);
230 		if (ret)
231 			break;
232 	}
233 
234 	return ret;
235 }
236 EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
237 
238 int clip_tbl_show(struct seq_file *seq, void *v)
239 {
240 	struct adapter *adapter = seq->private;
241 	struct clip_tbl *ctbl = adapter->clipt;
242 	struct clip_entry *ce;
243 	char ip[60];
244 	int i;
245 
246 	read_lock_bh(&ctbl->lock);
247 
248 	seq_puts(seq, "IP Address                  Users\n");
249 	for (i = 0 ; i < ctbl->clipt_size;  ++i) {
250 		list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 			ip[0] = '\0';
252 			if (ce->addr_len == 16)
253 				sprintf(ip, "%pI6c", ce->addr);
254 			else
255 				sprintf(ip, "%pI4c", ce->addr);
256 			seq_printf(seq, "%-25s   %u\n", ip,
257 				   atomic_read(&ce->refcnt));
258 		}
259 	}
260 	seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
261 
262 	read_unlock_bh(&ctbl->lock);
263 
264 	return 0;
265 }
266 
267 struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
268 				  unsigned int clipt_end)
269 {
270 	struct clip_entry *cl_list;
271 	struct clip_tbl *ctbl;
272 	unsigned int clipt_size;
273 	int i;
274 
275 	if (clipt_start >= clipt_end)
276 		return NULL;
277 	clipt_size = clipt_end - clipt_start + 1;
278 	if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
279 		return NULL;
280 
281 	ctbl = t4_alloc_mem(sizeof(*ctbl) +
282 			    clipt_size*sizeof(struct list_head));
283 	if (!ctbl)
284 		return NULL;
285 
286 	ctbl->clipt_start = clipt_start;
287 	ctbl->clipt_size = clipt_size;
288 	INIT_LIST_HEAD(&ctbl->ce_free_head);
289 
290 	atomic_set(&ctbl->nfree, clipt_size);
291 	rwlock_init(&ctbl->lock);
292 
293 	for (i = 0; i < ctbl->clipt_size; ++i)
294 		INIT_LIST_HEAD(&ctbl->hash_list[i]);
295 
296 	cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
297 	ctbl->cl_list = (void *)cl_list;
298 
299 	for (i = 0; i < clipt_size; i++) {
300 		INIT_LIST_HEAD(&cl_list[i].list);
301 		list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
302 	}
303 
304 	return ctbl;
305 }
306 
307 void t4_cleanup_clip_tbl(struct adapter *adap)
308 {
309 	struct clip_tbl *ctbl = adap->clipt;
310 
311 	if (ctbl) {
312 		if (ctbl->cl_list)
313 			t4_free_mem(ctbl->cl_list);
314 		t4_free_mem(ctbl);
315 	}
316 }
317 EXPORT_SYMBOL(t4_cleanup_clip_tbl);
318