1 /* 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "cxgb4.h" 36 #include "smt.h" 37 #include "t4_msg.h" 38 #include "t4fw_api.h" 39 #include "t4_regs.h" 40 #include "t4_values.h" 41 42 struct smt_data *t4_init_smt(void) 43 { 44 unsigned int smt_size; 45 struct smt_data *s; 46 int i; 47 48 smt_size = SMT_SIZE; 49 50 s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL); 51 if (!s) 52 return NULL; 53 s->smt_size = smt_size; 54 rwlock_init(&s->lock); 55 for (i = 0; i < s->smt_size; ++i) { 56 s->smtab[i].idx = i; 57 s->smtab[i].state = SMT_STATE_UNUSED; 58 memset(&s->smtab[i].src_mac, 0, ETH_ALEN); 59 spin_lock_init(&s->smtab[i].lock); 60 s->smtab[i].refcnt = 0; 61 } 62 return s; 63 } 64 65 static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) 66 { 67 struct smt_entry *first_free = NULL; 68 struct smt_entry *e, *end; 69 70 for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { 71 if (e->refcnt == 0) { 72 if (!first_free) 73 first_free = e; 74 } else { 75 if (e->state == SMT_STATE_SWITCHING) { 76 /* This entry is actually in use. See if we can 77 * re-use it ? 78 */ 79 if (memcmp(e->src_mac, smac, ETH_ALEN) == 0) 80 goto found_reuse; 81 } 82 } 83 } 84 85 if (first_free) { 86 e = first_free; 87 goto found; 88 } 89 return NULL; 90 91 found: 92 e->state = SMT_STATE_UNUSED; 93 94 found_reuse: 95 return e; 96 } 97 98 static void t4_smte_free(struct smt_entry *e) 99 { 100 if (e->refcnt == 0) { /* hasn't been recycled */ 101 e->state = SMT_STATE_UNUSED; 102 } 103 } 104 105 /** 106 * cxgb4_smt_release - Release SMT entry 107 * @e: smt entry to release 108 * 109 * Releases ref count and frees up an smt entry from SMT table 110 */ 111 void cxgb4_smt_release(struct smt_entry *e) 112 { 113 spin_lock_bh(&e->lock); 114 if ((--e->refcnt) == 0) 115 t4_smte_free(e); 116 spin_unlock_bh(&e->lock); 117 } 118 EXPORT_SYMBOL(cxgb4_smt_release); 119 120 void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl) 121 { 122 unsigned int smtidx = TID_TID_G(GET_TID(rpl)); 123 struct smt_data *s = adap->smt; 124 125 if (unlikely(rpl->status != CPL_ERR_NONE)) { 126 struct smt_entry *e = &s->smtab[smtidx]; 127 128 dev_err(adap->pdev_dev, 129 "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 130 rpl->status, smtidx); 131 spin_lock(&e->lock); 132 e->state = SMT_STATE_ERROR; 133 spin_unlock(&e->lock); 134 return; 135 } 136 } 137 138 static int write_smt_entry(struct adapter *adapter, struct smt_entry *e) 139 { 140 struct cpl_t6_smt_write_req *t6req; 141 struct smt_data *s = adapter->smt; 142 struct cpl_smt_write_req *req; 143 struct sk_buff *skb; 144 int size; 145 u8 row; 146 147 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { 148 size = sizeof(*req); 149 skb = alloc_skb(size, GFP_ATOMIC); 150 if (!skb) 151 return -ENOMEM; 152 /* Source MAC Table (SMT) contains 256 SMAC entries 153 * organized in 128 rows of 2 entries each. 154 */ 155 req = (struct cpl_smt_write_req *)__skb_put(skb, size); 156 INIT_TP_WR(req, 0); 157 158 /* Each row contains an SMAC pair. 159 * LSB selects the SMAC entry within a row 160 */ 161 row = (e->idx >> 1); 162 if (e->idx & 1) { 163 req->pfvf1 = 0x0; 164 memcpy(req->src_mac1, e->src_mac, ETH_ALEN); 165 166 /* fill pfvf0/src_mac0 with entry 167 * at prev index from smt-tab. 168 */ 169 req->pfvf0 = 0x0; 170 memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac, 171 ETH_ALEN); 172 } else { 173 req->pfvf0 = 0x0; 174 memcpy(req->src_mac0, e->src_mac, ETH_ALEN); 175 176 /* fill pfvf1/src_mac1 with entry 177 * at next index from smt-tab 178 */ 179 req->pfvf1 = 0x0; 180 memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac, 181 ETH_ALEN); 182 } 183 } else { 184 size = sizeof(*t6req); 185 skb = alloc_skb(size, GFP_ATOMIC); 186 if (!skb) 187 return -ENOMEM; 188 /* Source MAC Table (SMT) contains 256 SMAC entries */ 189 t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size); 190 INIT_TP_WR(t6req, 0); 191 req = (struct cpl_smt_write_req *)t6req; 192 193 /* fill pfvf0/src_mac0 from smt-tab */ 194 req->pfvf0 = 0x0; 195 memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN); 196 row = e->idx; 197 } 198 199 OPCODE_TID(req) = 200 htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx | 201 TID_QID_V(adapter->sge.fw_evtq.abs_id))); 202 req->params = htonl(SMTW_NORPL_V(0) | 203 SMTW_IDX_V(row) | 204 SMTW_OVLAN_IDX_V(0)); 205 t4_mgmt_tx(adapter, skb); 206 return 0; 207 } 208 209 static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, 210 u8 *smac) 211 { 212 struct smt_data *s = adap->smt; 213 struct smt_entry *e; 214 215 write_lock_bh(&s->lock); 216 e = find_or_alloc_smte(s, smac); 217 if (e) { 218 spin_lock(&e->lock); 219 if (!e->refcnt) { 220 e->refcnt = 1; 221 e->state = SMT_STATE_SWITCHING; 222 e->pfvf = pfvf; 223 memcpy(e->src_mac, smac, ETH_ALEN); 224 write_smt_entry(adap, e); 225 } else { 226 ++e->refcnt; 227 } 228 spin_unlock(&e->lock); 229 } 230 write_unlock_bh(&s->lock); 231 return e; 232 } 233 234 /** 235 * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters. 236 * @dev: net_device pointer 237 * @smac: MAC address to add to SMT 238 * Returns pointer to the SMT entry created 239 * 240 * Allocates an SMT entry to be used by switching rule of a filter. 241 */ 242 struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac) 243 { 244 struct adapter *adap = netdev2adap(dev); 245 246 return t4_smt_alloc_switching(adap, 0x0, smac); 247 } 248 EXPORT_SYMBOL(cxgb4_smt_alloc_switching); 249