1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2020, NXP Semiconductors 3 */ 4 #include "sja1105.h" 5 6 static struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv, 7 unsigned long cookie) 8 { 9 struct sja1105_rule *rule; 10 11 list_for_each_entry(rule, &priv->flow_block.rules, list) 12 if (rule->cookie == cookie) 13 return rule; 14 15 return NULL; 16 } 17 18 static int sja1105_find_free_l2_policer(struct sja1105_private *priv) 19 { 20 int i; 21 22 for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++) 23 if (!priv->flow_block.l2_policer_used[i]) 24 return i; 25 26 return -1; 27 } 28 29 static int sja1105_setup_bcast_policer(struct sja1105_private *priv, 30 struct netlink_ext_ack *extack, 31 unsigned long cookie, int port, 32 u64 rate_bytes_per_sec, 33 s64 burst) 34 { 35 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); 36 struct sja1105_l2_policing_entry *policing; 37 bool new_rule = false; 38 unsigned long p; 39 int rc; 40 41 if (!rule) { 42 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 43 if (!rule) 44 return -ENOMEM; 45 46 rule->cookie = cookie; 47 rule->type = SJA1105_RULE_BCAST_POLICER; 48 rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv); 49 new_rule = true; 50 } 51 52 if (rule->bcast_pol.sharindx == -1) { 53 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free"); 54 rc = -ENOSPC; 55 goto out; 56 } 57 58 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 59 60 if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) { 61 NL_SET_ERR_MSG_MOD(extack, 62 "Port already has a broadcast policer"); 63 rc = -EEXIST; 64 goto out; 65 } 66 67 rule->port_mask |= BIT(port); 68 69 /* Make the broadcast policers of all ports attached to this block 70 * point to the newly allocated policer 71 */ 72 for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) { 73 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p; 74 75 policing[bcast].sharindx = rule->bcast_pol.sharindx; 76 } 77 78 policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 79 512, 1000000); 80 policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec * 81 PSCHED_NS2TICKS(burst), 82 PSCHED_TICKS_PER_SEC); 83 /* TODO: support per-flow MTU */ 84 policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + 85 ETH_FCS_LEN; 86 87 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 88 89 out: 90 if (rc == 0 && new_rule) { 91 priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true; 92 list_add(&rule->list, &priv->flow_block.rules); 93 } else if (new_rule) { 94 kfree(rule); 95 } 96 97 return rc; 98 } 99 100 static int sja1105_setup_tc_policer(struct sja1105_private *priv, 101 struct netlink_ext_ack *extack, 102 unsigned long cookie, int port, int tc, 103 u64 rate_bytes_per_sec, 104 s64 burst) 105 { 106 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); 107 struct sja1105_l2_policing_entry *policing; 108 bool new_rule = false; 109 unsigned long p; 110 int rc; 111 112 if (!rule) { 113 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 114 if (!rule) 115 return -ENOMEM; 116 117 rule->cookie = cookie; 118 rule->type = SJA1105_RULE_TC_POLICER; 119 rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv); 120 rule->tc_pol.tc = tc; 121 new_rule = true; 122 } 123 124 if (rule->tc_pol.sharindx == -1) { 125 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free"); 126 rc = -ENOSPC; 127 goto out; 128 } 129 130 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 131 132 if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) { 133 NL_SET_ERR_MSG_MOD(extack, 134 "Port-TC pair already has an L2 policer"); 135 rc = -EEXIST; 136 goto out; 137 } 138 139 rule->port_mask |= BIT(port); 140 141 /* Make the policers for traffic class @tc of all ports attached to 142 * this block point to the newly allocated policer 143 */ 144 for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) { 145 int index = (p * SJA1105_NUM_TC) + tc; 146 147 policing[index].sharindx = rule->tc_pol.sharindx; 148 } 149 150 policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 151 512, 1000000); 152 policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec * 153 PSCHED_NS2TICKS(burst), 154 PSCHED_TICKS_PER_SEC); 155 /* TODO: support per-flow MTU */ 156 policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + 157 ETH_FCS_LEN; 158 159 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 160 161 out: 162 if (rc == 0 && new_rule) { 163 priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true; 164 list_add(&rule->list, &priv->flow_block.rules); 165 } else if (new_rule) { 166 kfree(rule); 167 } 168 169 return rc; 170 } 171 172 static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port, 173 struct netlink_ext_ack *extack, 174 struct flow_cls_offload *cls, 175 u64 rate_bytes_per_sec, 176 s64 burst) 177 { 178 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 179 struct flow_dissector *dissector = rule->match.dissector; 180 181 if (dissector->used_keys & 182 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | 183 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 184 BIT(FLOW_DISSECTOR_KEY_VLAN) | 185 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { 186 NL_SET_ERR_MSG_MOD(extack, 187 "Unsupported keys used"); 188 return -EOPNOTSUPP; 189 } 190 191 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 192 struct flow_match_basic match; 193 194 flow_rule_match_basic(rule, &match); 195 if (match.key->n_proto) { 196 NL_SET_ERR_MSG_MOD(extack, 197 "Matching on protocol not supported"); 198 return -EOPNOTSUPP; 199 } 200 } 201 202 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 203 u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 204 u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 205 struct flow_match_eth_addrs match; 206 207 flow_rule_match_eth_addrs(rule, &match); 208 209 if (!ether_addr_equal_masked(match.key->src, null, 210 match.mask->src)) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Matching on source MAC not supported"); 213 return -EOPNOTSUPP; 214 } 215 216 if (!ether_addr_equal_masked(match.key->dst, bcast, 217 match.mask->dst)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Only matching on broadcast DMAC is supported"); 220 return -EOPNOTSUPP; 221 } 222 223 return sja1105_setup_bcast_policer(priv, extack, cls->cookie, 224 port, rate_bytes_per_sec, 225 burst); 226 } 227 228 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 229 struct flow_match_vlan match; 230 231 flow_rule_match_vlan(rule, &match); 232 233 if (match.key->vlan_id & match.mask->vlan_id) { 234 NL_SET_ERR_MSG_MOD(extack, 235 "Matching on VID is not supported"); 236 return -EOPNOTSUPP; 237 } 238 239 if (match.mask->vlan_priority != 0x7) { 240 NL_SET_ERR_MSG_MOD(extack, 241 "Masked matching on PCP is not supported"); 242 return -EOPNOTSUPP; 243 } 244 245 return sja1105_setup_tc_policer(priv, extack, cls->cookie, port, 246 match.key->vlan_priority, 247 rate_bytes_per_sec, 248 burst); 249 } 250 251 NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key"); 252 return -EOPNOTSUPP; 253 } 254 255 int sja1105_cls_flower_add(struct dsa_switch *ds, int port, 256 struct flow_cls_offload *cls, bool ingress) 257 { 258 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 259 struct netlink_ext_ack *extack = cls->common.extack; 260 struct sja1105_private *priv = ds->priv; 261 const struct flow_action_entry *act; 262 int rc = -EOPNOTSUPP, i; 263 264 flow_action_for_each(i, act, &rule->action) { 265 switch (act->id) { 266 case FLOW_ACTION_POLICE: 267 rc = sja1105_flower_parse_policer(priv, port, extack, cls, 268 act->police.rate_bytes_ps, 269 act->police.burst); 270 break; 271 default: 272 NL_SET_ERR_MSG_MOD(extack, 273 "Action not supported"); 274 break; 275 } 276 } 277 278 return rc; 279 } 280 281 int sja1105_cls_flower_del(struct dsa_switch *ds, int port, 282 struct flow_cls_offload *cls, bool ingress) 283 { 284 struct sja1105_private *priv = ds->priv; 285 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie); 286 struct sja1105_l2_policing_entry *policing; 287 int old_sharindx; 288 289 if (!rule) 290 return 0; 291 292 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; 293 294 if (rule->type == SJA1105_RULE_BCAST_POLICER) { 295 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port; 296 297 old_sharindx = policing[bcast].sharindx; 298 policing[bcast].sharindx = port; 299 } else if (rule->type == SJA1105_RULE_TC_POLICER) { 300 int index = (port * SJA1105_NUM_TC) + rule->tc_pol.tc; 301 302 old_sharindx = policing[index].sharindx; 303 policing[index].sharindx = port; 304 } else { 305 return -EINVAL; 306 } 307 308 rule->port_mask &= ~BIT(port); 309 if (!rule->port_mask) { 310 priv->flow_block.l2_policer_used[old_sharindx] = false; 311 list_del(&rule->list); 312 kfree(rule); 313 } 314 315 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); 316 } 317 318 void sja1105_flower_setup(struct dsa_switch *ds) 319 { 320 struct sja1105_private *priv = ds->priv; 321 int port; 322 323 INIT_LIST_HEAD(&priv->flow_block.rules); 324 325 for (port = 0; port < SJA1105_NUM_PORTS; port++) 326 priv->flow_block.l2_policer_used[port] = true; 327 } 328 329 void sja1105_flower_teardown(struct dsa_switch *ds) 330 { 331 struct sja1105_private *priv = ds->priv; 332 struct sja1105_rule *rule; 333 struct list_head *pos, *n; 334 335 list_for_each_safe(pos, n, &priv->flow_block.rules) { 336 rule = list_entry(pos, struct sja1105_rule, list); 337 list_del(&rule->list); 338 kfree(rule); 339 } 340 } 341