1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved. 4 * 5 * Written by Deepak (deepak.s@chelsio.com) 6 * 7 * This program is distributed in the hope that it will be useful, but WITHOUT 8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 10 * release for licensing terms and conditions. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/jhash.h> 16 #include <linux/if_vlan.h> 17 #include <net/addrconf.h> 18 #include "cxgb4.h" 19 #include "clip_tbl.h" 20 21 static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key) 22 { 23 unsigned int clipt_size_half = c->clipt_size / 2; 24 25 return jhash_1word(*key, 0) % clipt_size_half; 26 } 27 28 static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) 29 { 30 unsigned int clipt_size_half = d->clipt_size / 2; 31 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; 32 33 return clipt_size_half + 34 (jhash_1word(xor, 0) % clipt_size_half); 35 } 36 37 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 38 u8 v6) 39 { 40 return v6 ? ipv6_clip_hash(ctbl, addr) : 41 ipv4_clip_hash(ctbl, addr); 42 } 43 44 static int clip6_get_mbox(const struct net_device *dev, 45 const struct in6_addr *lip) 46 { 47 struct adapter *adap = netdev2adap(dev); 48 struct fw_clip_cmd c; 49 50 memset(&c, 0, sizeof(c)); 51 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | 52 FW_CMD_REQUEST_F | FW_CMD_WRITE_F); 53 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); 54 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); 55 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 56 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 57 } 58 59 static int clip6_release_mbox(const struct net_device *dev, 60 const struct in6_addr *lip) 61 { 62 struct adapter *adap = netdev2adap(dev); 63 struct fw_clip_cmd c; 64 65 memset(&c, 0, sizeof(c)); 66 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | 67 FW_CMD_REQUEST_F | FW_CMD_READ_F); 68 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); 69 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); 70 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 71 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 72 } 73 74 int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) 75 { 76 struct adapter *adap = netdev2adap(dev); 77 struct clip_tbl *ctbl = adap->clipt; 78 struct clip_entry *ce, *cte; 79 u32 *addr = (u32 *)lip; 80 int hash; 81 int ret = -1; 82 83 if (!ctbl) 84 return 0; 85 86 hash = clip_addr_hash(ctbl, addr, v6); 87 88 read_lock_bh(&ctbl->lock); 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 90 if (cte->addr6.sin6_family == AF_INET6 && v6) 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, 92 sizeof(struct in6_addr)); 93 else if (cte->addr.sin_family == AF_INET && !v6) 94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr), 95 sizeof(struct in_addr)); 96 if (!ret) { 97 ce = cte; 98 read_unlock_bh(&ctbl->lock); 99 refcount_inc(&ce->refcnt); 100 return 0; 101 } 102 } 103 read_unlock_bh(&ctbl->lock); 104 105 write_lock_bh(&ctbl->lock); 106 if (!list_empty(&ctbl->ce_free_head)) { 107 ce = list_first_entry(&ctbl->ce_free_head, 108 struct clip_entry, list); 109 list_del(&ce->list); 110 INIT_LIST_HEAD(&ce->list); 111 spin_lock_init(&ce->lock); 112 refcount_set(&ce->refcnt, 0); 113 atomic_dec(&ctbl->nfree); 114 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 115 if (v6) { 116 ce->addr6.sin6_family = AF_INET6; 117 memcpy(ce->addr6.sin6_addr.s6_addr, 118 lip, sizeof(struct in6_addr)); 119 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 120 if (ret) { 121 write_unlock_bh(&ctbl->lock); 122 dev_err(adap->pdev_dev, 123 "CLIP FW cmd failed with error %d, " 124 "Connections using %pI6c wont be " 125 "offloaded", 126 ret, ce->addr6.sin6_addr.s6_addr); 127 return ret; 128 } 129 } else { 130 ce->addr.sin_family = AF_INET; 131 memcpy((char *)(&ce->addr.sin_addr), lip, 132 sizeof(struct in_addr)); 133 } 134 } else { 135 write_unlock_bh(&ctbl->lock); 136 dev_info(adap->pdev_dev, "CLIP table overflow, " 137 "Connections using %pI6c wont be offloaded", 138 (void *)lip); 139 return -ENOMEM; 140 } 141 write_unlock_bh(&ctbl->lock); 142 refcount_set(&ce->refcnt, 1); 143 return 0; 144 } 145 EXPORT_SYMBOL(cxgb4_clip_get); 146 147 void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) 148 { 149 struct adapter *adap = netdev2adap(dev); 150 struct clip_tbl *ctbl = adap->clipt; 151 struct clip_entry *ce, *cte; 152 u32 *addr = (u32 *)lip; 153 int hash; 154 int ret = -1; 155 156 if (!ctbl) 157 return; 158 159 hash = clip_addr_hash(ctbl, addr, v6); 160 161 read_lock_bh(&ctbl->lock); 162 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 163 if (cte->addr6.sin6_family == AF_INET6 && v6) 164 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, 165 sizeof(struct in6_addr)); 166 else if (cte->addr.sin_family == AF_INET && !v6) 167 ret = memcmp(lip, (char *)(&cte->addr.sin_addr), 168 sizeof(struct in_addr)); 169 if (!ret) { 170 ce = cte; 171 read_unlock_bh(&ctbl->lock); 172 goto found; 173 } 174 } 175 read_unlock_bh(&ctbl->lock); 176 177 return; 178 found: 179 write_lock_bh(&ctbl->lock); 180 spin_lock_bh(&ce->lock); 181 if (refcount_dec_and_test(&ce->refcnt)) { 182 list_del(&ce->list); 183 INIT_LIST_HEAD(&ce->list); 184 list_add_tail(&ce->list, &ctbl->ce_free_head); 185 atomic_inc(&ctbl->nfree); 186 if (v6) 187 clip6_release_mbox(dev, (const struct in6_addr *)lip); 188 } 189 spin_unlock_bh(&ce->lock); 190 write_unlock_bh(&ctbl->lock); 191 } 192 EXPORT_SYMBOL(cxgb4_clip_release); 193 194 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with 195 * a physical device. 196 * The physical device reference is needed to send the actul CLIP command. 197 */ 198 static int cxgb4_update_dev_clip(struct net_device *root_dev, 199 struct net_device *dev) 200 { 201 struct inet6_dev *idev = NULL; 202 struct inet6_ifaddr *ifa; 203 int ret = 0; 204 205 idev = __in6_dev_get(root_dev); 206 if (!idev) 207 return ret; 208 209 read_lock_bh(&idev->lock); 210 list_for_each_entry(ifa, &idev->addr_list, if_list) { 211 ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1); 212 if (ret < 0) 213 break; 214 } 215 read_unlock_bh(&idev->lock); 216 217 return ret; 218 } 219 220 int cxgb4_update_root_dev_clip(struct net_device *dev) 221 { 222 struct net_device *root_dev = NULL; 223 int i, ret = 0; 224 225 /* First populate the real net device's IPv6 addresses */ 226 ret = cxgb4_update_dev_clip(dev, dev); 227 if (ret) 228 return ret; 229 230 /* Parse all bond and vlan devices layered on top of the physical dev */ 231 root_dev = netdev_master_upper_dev_get_rcu(dev); 232 if (root_dev) { 233 ret = cxgb4_update_dev_clip(root_dev, dev); 234 if (ret) 235 return ret; 236 } 237 238 for (i = 0; i < VLAN_N_VID; i++) { 239 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); 240 if (!root_dev) 241 continue; 242 243 ret = cxgb4_update_dev_clip(root_dev, dev); 244 if (ret) 245 break; 246 } 247 248 return ret; 249 } 250 EXPORT_SYMBOL(cxgb4_update_root_dev_clip); 251 252 int clip_tbl_show(struct seq_file *seq, void *v) 253 { 254 struct adapter *adapter = seq->private; 255 struct clip_tbl *ctbl = adapter->clipt; 256 struct clip_entry *ce; 257 char ip[60]; 258 int i; 259 260 read_lock_bh(&ctbl->lock); 261 262 seq_puts(seq, "IP Address Users\n"); 263 for (i = 0 ; i < ctbl->clipt_size; ++i) { 264 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 265 ip[0] = '\0'; 266 sprintf(ip, "%pISc", &ce->addr); 267 seq_printf(seq, "%-25s %u\n", ip, 268 refcount_read(&ce->refcnt)); 269 } 270 } 271 seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree)); 272 273 read_unlock_bh(&ctbl->lock); 274 275 return 0; 276 } 277 278 struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, 279 unsigned int clipt_end) 280 { 281 struct clip_entry *cl_list; 282 struct clip_tbl *ctbl; 283 unsigned int clipt_size; 284 int i; 285 286 if (clipt_start >= clipt_end) 287 return NULL; 288 clipt_size = clipt_end - clipt_start + 1; 289 if (clipt_size < CLIPT_MIN_HASH_BUCKETS) 290 return NULL; 291 292 ctbl = kvzalloc(sizeof(*ctbl) + 293 clipt_size*sizeof(struct list_head), GFP_KERNEL); 294 if (!ctbl) 295 return NULL; 296 297 ctbl->clipt_start = clipt_start; 298 ctbl->clipt_size = clipt_size; 299 INIT_LIST_HEAD(&ctbl->ce_free_head); 300 301 atomic_set(&ctbl->nfree, clipt_size); 302 rwlock_init(&ctbl->lock); 303 304 for (i = 0; i < ctbl->clipt_size; ++i) 305 INIT_LIST_HEAD(&ctbl->hash_list[i]); 306 307 cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL); 308 if (!cl_list) { 309 kvfree(ctbl); 310 return NULL; 311 } 312 ctbl->cl_list = (void *)cl_list; 313 314 for (i = 0; i < clipt_size; i++) { 315 INIT_LIST_HEAD(&cl_list[i].list); 316 list_add_tail(&cl_list[i].list, &ctbl->ce_free_head); 317 } 318 319 return ctbl; 320 } 321 322 void t4_cleanup_clip_tbl(struct adapter *adap) 323 { 324 struct clip_tbl *ctbl = adap->clipt; 325 326 if (ctbl) { 327 if (ctbl->cl_list) 328 kvfree(ctbl->cl_list); 329 kvfree(ctbl); 330 } 331 } 332 EXPORT_SYMBOL(t4_cleanup_clip_tbl); 333