1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/netdevice.h> 6 #include <linux/skbuff.h> 7 #include <linux/workqueue.h> 8 #include <net/dst_metadata.h> 9 10 #include "main.h" 11 #include "../nfp_net.h" 12 #include "../nfp_net_repr.h" 13 #include "./cmsg.h" 14 15 static struct nfp_flower_cmsg_hdr * 16 nfp_flower_cmsg_get_hdr(struct sk_buff *skb) 17 { 18 return (struct nfp_flower_cmsg_hdr *)skb->data; 19 } 20 21 struct sk_buff * 22 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, 23 enum nfp_flower_cmsg_type_port type, gfp_t flag) 24 { 25 struct nfp_flower_cmsg_hdr *ch; 26 struct sk_buff *skb; 27 28 size += NFP_FLOWER_CMSG_HLEN; 29 30 skb = nfp_app_ctrl_msg_alloc(app, size, flag); 31 if (!skb) 32 return NULL; 33 34 ch = nfp_flower_cmsg_get_hdr(skb); 35 ch->pad = 0; 36 ch->version = NFP_FLOWER_CMSG_VER1; 37 ch->type = type; 38 skb_put(skb, size); 39 40 return skb; 41 } 42 43 struct sk_buff * 44 nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) 45 { 46 struct nfp_flower_cmsg_mac_repr *msg; 47 struct sk_buff *skb; 48 unsigned int size; 49 50 size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); 51 skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR, 52 GFP_KERNEL); 53 if (!skb) 54 return NULL; 55 56 msg = nfp_flower_cmsg_get_data(skb); 57 memset(msg->reserved, 0, sizeof(msg->reserved)); 58 msg->num_ports = num_ports; 59 60 return skb; 61 } 62 63 void 64 nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, 65 unsigned int nbi, unsigned int nbi_port, 66 unsigned int phys_port) 67 { 68 struct nfp_flower_cmsg_mac_repr *msg; 69 70 msg = nfp_flower_cmsg_get_data(skb); 71 msg->ports[idx].idx = idx; 72 msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI; 73 msg->ports[idx].nbi_port = nbi_port; 74 msg->ports[idx].phys_port = phys_port; 75 } 76 77 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok, 78 unsigned int mtu, bool mtu_only) 79 { 80 struct nfp_flower_cmsg_portmod *msg; 81 struct sk_buff *skb; 82 83 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), 84 NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL); 85 if (!skb) 86 return -ENOMEM; 87 88 msg = nfp_flower_cmsg_get_data(skb); 89 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); 90 msg->reserved = 0; 91 msg->info = carrier_ok; 92 93 if (mtu_only) 94 msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY; 95 96 msg->mtu = cpu_to_be16(mtu); 97 98 nfp_ctrl_tx(repr->app->ctrl, skb); 99 100 return 0; 101 } 102 103 int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) 104 { 105 struct nfp_flower_cmsg_portreify *msg; 106 struct sk_buff *skb; 107 108 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), 109 NFP_FLOWER_CMSG_TYPE_PORT_REIFY, 110 GFP_KERNEL); 111 if (!skb) 112 return -ENOMEM; 113 114 msg = nfp_flower_cmsg_get_data(skb); 115 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); 116 msg->reserved = 0; 117 msg->info = cpu_to_be16(exists); 118 119 nfp_ctrl_tx(repr->app->ctrl, skb); 120 121 return 0; 122 } 123 124 static bool 125 nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb) 126 { 127 struct nfp_flower_priv *app_priv = app->priv; 128 struct nfp_flower_cmsg_portmod *msg; 129 130 msg = nfp_flower_cmsg_get_data(skb); 131 132 if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY)) 133 return false; 134 135 spin_lock_bh(&app_priv->mtu_conf.lock); 136 if (!app_priv->mtu_conf.requested_val || 137 app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) || 138 be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) { 139 /* Not an ack for requested MTU change. */ 140 spin_unlock_bh(&app_priv->mtu_conf.lock); 141 return false; 142 } 143 144 app_priv->mtu_conf.ack = true; 145 app_priv->mtu_conf.requested_val = 0; 146 wake_up(&app_priv->mtu_conf.wait_q); 147 spin_unlock_bh(&app_priv->mtu_conf.lock); 148 149 return true; 150 } 151 152 static void 153 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) 154 { 155 struct nfp_flower_cmsg_portmod *msg; 156 struct net_device *netdev; 157 bool link; 158 159 msg = nfp_flower_cmsg_get_data(skb); 160 link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK; 161 162 rtnl_lock(); 163 rcu_read_lock(); 164 netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); 165 rcu_read_unlock(); 166 if (!netdev) { 167 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", 168 be32_to_cpu(msg->portnum)); 169 rtnl_unlock(); 170 return; 171 } 172 173 if (link) { 174 u16 mtu = be16_to_cpu(msg->mtu); 175 176 netif_carrier_on(netdev); 177 178 /* An MTU of 0 from the firmware should be ignored */ 179 if (mtu) 180 dev_set_mtu(netdev, mtu); 181 } else { 182 netif_carrier_off(netdev); 183 } 184 rtnl_unlock(); 185 } 186 187 static void 188 nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) 189 { 190 struct nfp_flower_priv *priv = app->priv; 191 struct nfp_flower_cmsg_portreify *msg; 192 bool exists; 193 194 msg = nfp_flower_cmsg_get_data(skb); 195 196 rcu_read_lock(); 197 exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); 198 rcu_read_unlock(); 199 if (!exists) { 200 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", 201 be32_to_cpu(msg->portnum)); 202 return; 203 } 204 205 atomic_inc(&priv->reify_replies); 206 wake_up_interruptible(&priv->reify_wait_queue); 207 } 208 209 static void 210 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) 211 { 212 struct nfp_flower_priv *app_priv = app->priv; 213 struct nfp_flower_cmsg_hdr *cmsg_hdr; 214 enum nfp_flower_cmsg_type_port type; 215 bool skb_stored = false; 216 217 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); 218 219 type = cmsg_hdr->type; 220 switch (type) { 221 case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: 222 nfp_flower_cmsg_portreify_rx(app, skb); 223 break; 224 case NFP_FLOWER_CMSG_TYPE_PORT_MOD: 225 nfp_flower_cmsg_portmod_rx(app, skb); 226 break; 227 case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: 228 nfp_tunnel_request_route(app, skb); 229 break; 230 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: 231 nfp_tunnel_keep_alive(app, skb); 232 break; 233 case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: 234 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 235 skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); 236 break; 237 } 238 /* fall through */ 239 default: 240 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", 241 type); 242 goto out; 243 } 244 245 if (!skb_stored) 246 dev_consume_skb_any(skb); 247 return; 248 out: 249 dev_kfree_skb_any(skb); 250 } 251 252 void nfp_flower_cmsg_process_rx(struct work_struct *work) 253 { 254 struct sk_buff_head cmsg_joined; 255 struct nfp_flower_priv *priv; 256 struct sk_buff *skb; 257 258 priv = container_of(work, struct nfp_flower_priv, cmsg_work); 259 skb_queue_head_init(&cmsg_joined); 260 261 spin_lock_bh(&priv->cmsg_skbs_high.lock); 262 skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); 263 spin_unlock_bh(&priv->cmsg_skbs_high.lock); 264 265 spin_lock_bh(&priv->cmsg_skbs_low.lock); 266 skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); 267 spin_unlock_bh(&priv->cmsg_skbs_low.lock); 268 269 while ((skb = __skb_dequeue(&cmsg_joined))) 270 nfp_flower_cmsg_process_one_rx(priv->app, skb); 271 } 272 273 static void 274 nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) 275 { 276 struct nfp_flower_priv *priv = app->priv; 277 struct sk_buff_head *skb_head; 278 279 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || 280 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) 281 skb_head = &priv->cmsg_skbs_high; 282 else 283 skb_head = &priv->cmsg_skbs_low; 284 285 if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { 286 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); 287 dev_kfree_skb_any(skb); 288 return; 289 } 290 291 skb_queue_tail(skb_head, skb); 292 schedule_work(&priv->cmsg_work); 293 } 294 295 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) 296 { 297 struct nfp_flower_cmsg_hdr *cmsg_hdr; 298 299 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); 300 301 if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) { 302 nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n", 303 cmsg_hdr->version); 304 dev_kfree_skb_any(skb); 305 return; 306 } 307 308 if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) { 309 /* We need to deal with stats updates from HW asap */ 310 nfp_flower_rx_flow_stats(app, skb); 311 dev_consume_skb_any(skb); 312 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD && 313 nfp_flower_process_mtu_ack(app, skb)) { 314 /* Handle MTU acks outside wq to prevent RTNL conflict. */ 315 dev_consume_skb_any(skb); 316 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { 317 /* Acks from the NFP that the route is added - ignore. */ 318 dev_consume_skb_any(skb); 319 } else { 320 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); 321 } 322 } 323