1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2019 Solarflare Communications Inc. 5 * Copyright 2020-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include "ef100_rep.h" 13 #include "ef100_netdev.h" 14 #include "ef100_nic.h" 15 #include "mae.h" 16 #include "rx_common.h" 17 #include "tc_bindings.h" 18 19 #define EFX_EF100_REP_DRIVER "efx_ef100_rep" 20 21 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64 22 23 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight); 24 25 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, 26 unsigned int i) 27 { 28 efv->parent = efx; 29 efv->idx = i; 30 INIT_LIST_HEAD(&efv->list); 31 efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 32 INIT_LIST_HEAD(&efv->dflt.acts.list); 33 INIT_LIST_HEAD(&efv->rx_list); 34 spin_lock_init(&efv->rx_lock); 35 efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 36 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 37 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 38 NETIF_MSG_TX_ERR | NETIF_MSG_HW; 39 return 0; 40 } 41 42 static int efx_ef100_rep_open(struct net_device *net_dev) 43 { 44 struct efx_rep *efv = netdev_priv(net_dev); 45 46 netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll, 47 NAPI_POLL_WEIGHT); 48 napi_enable(&efv->napi); 49 return 0; 50 } 51 52 static int efx_ef100_rep_close(struct net_device *net_dev) 53 { 54 struct efx_rep *efv = netdev_priv(net_dev); 55 56 napi_disable(&efv->napi); 57 netif_napi_del(&efv->napi); 58 return 0; 59 } 60 61 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, 62 struct net_device *dev) 63 { 64 struct efx_rep *efv = netdev_priv(dev); 65 struct efx_nic *efx = efv->parent; 66 netdev_tx_t rc; 67 68 /* __ef100_hard_start_xmit() will always return success even in the 69 * case of TX drops, where it will increment efx's tx_dropped. The 70 * efv stats really only count attempted TX, not success/failure. 71 */ 72 atomic64_inc(&efv->stats.tx_packets); 73 atomic64_add(skb->len, &efv->stats.tx_bytes); 74 netif_tx_lock(efx->net_dev); 75 rc = __ef100_hard_start_xmit(skb, efx, dev, efv); 76 netif_tx_unlock(efx->net_dev); 77 return rc; 78 } 79 80 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev, 81 struct netdev_phys_item_id *ppid) 82 { 83 struct efx_rep *efv = netdev_priv(dev); 84 struct efx_nic *efx = efv->parent; 85 struct ef100_nic_data *nic_data; 86 87 nic_data = efx->nic_data; 88 /* nic_data->port_id is a u8[] */ 89 ppid->id_len = sizeof(nic_data->port_id); 90 memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id)); 91 return 0; 92 } 93 94 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, 95 char *buf, size_t len) 96 { 97 struct efx_rep *efv = netdev_priv(dev); 98 struct efx_nic *efx = efv->parent; 99 struct ef100_nic_data *nic_data; 100 int ret; 101 102 nic_data = efx->nic_data; 103 ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num, 104 nic_data->pf_index, efv->idx); 105 if (ret >= len) 106 return -EOPNOTSUPP; 107 108 return 0; 109 } 110 111 static int efx_ef100_rep_setup_tc(struct net_device *net_dev, 112 enum tc_setup_type type, void *type_data) 113 { 114 struct efx_rep *efv = netdev_priv(net_dev); 115 struct efx_nic *efx = efv->parent; 116 117 if (type == TC_SETUP_CLSFLOWER) 118 return efx_tc_flower(efx, net_dev, type_data, efv); 119 if (type == TC_SETUP_BLOCK) 120 return efx_tc_setup_block(net_dev, efx, type_data, efv); 121 122 return -EOPNOTSUPP; 123 } 124 125 static void efx_ef100_rep_get_stats64(struct net_device *dev, 126 struct rtnl_link_stats64 *stats) 127 { 128 struct efx_rep *efv = netdev_priv(dev); 129 130 stats->rx_packets = atomic64_read(&efv->stats.rx_packets); 131 stats->tx_packets = atomic64_read(&efv->stats.tx_packets); 132 stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes); 133 stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes); 134 stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped); 135 stats->tx_errors = atomic64_read(&efv->stats.tx_errors); 136 } 137 138 static const struct net_device_ops efx_ef100_rep_netdev_ops = { 139 .ndo_open = efx_ef100_rep_open, 140 .ndo_stop = efx_ef100_rep_close, 141 .ndo_start_xmit = efx_ef100_rep_xmit, 142 .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, 143 .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, 144 .ndo_get_stats64 = efx_ef100_rep_get_stats64, 145 .ndo_setup_tc = efx_ef100_rep_setup_tc, 146 }; 147 148 static void efx_ef100_rep_get_drvinfo(struct net_device *dev, 149 struct ethtool_drvinfo *drvinfo) 150 { 151 strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver)); 152 } 153 154 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev) 155 { 156 struct efx_rep *efv = netdev_priv(net_dev); 157 158 return efv->msg_enable; 159 } 160 161 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev, 162 u32 msg_enable) 163 { 164 struct efx_rep *efv = netdev_priv(net_dev); 165 166 efv->msg_enable = msg_enable; 167 } 168 169 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev, 170 struct ethtool_ringparam *ring, 171 struct kernel_ethtool_ringparam *kring, 172 struct netlink_ext_ack *ext_ack) 173 { 174 struct efx_rep *efv = netdev_priv(net_dev); 175 176 ring->rx_max_pending = U32_MAX; 177 ring->rx_pending = efv->rx_pring_size; 178 } 179 180 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev, 181 struct ethtool_ringparam *ring, 182 struct kernel_ethtool_ringparam *kring, 183 struct netlink_ext_ack *ext_ack) 184 { 185 struct efx_rep *efv = netdev_priv(net_dev); 186 187 if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending) 188 return -EINVAL; 189 190 efv->rx_pring_size = ring->rx_pending; 191 return 0; 192 } 193 194 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = { 195 .get_drvinfo = efx_ef100_rep_get_drvinfo, 196 .get_msglevel = efx_ef100_rep_ethtool_get_msglevel, 197 .set_msglevel = efx_ef100_rep_ethtool_set_msglevel, 198 .get_ringparam = efx_ef100_rep_ethtool_get_ringparam, 199 .set_ringparam = efx_ef100_rep_ethtool_set_ringparam, 200 }; 201 202 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, 203 unsigned int i) 204 { 205 struct net_device *net_dev; 206 struct efx_rep *efv; 207 int rc; 208 209 net_dev = alloc_etherdev_mq(sizeof(*efv), 1); 210 if (!net_dev) 211 return ERR_PTR(-ENOMEM); 212 213 efv = netdev_priv(net_dev); 214 rc = efx_ef100_rep_init_struct(efx, efv, i); 215 if (rc) 216 goto fail1; 217 efv->net_dev = net_dev; 218 rtnl_lock(); 219 spin_lock_bh(&efx->vf_reps_lock); 220 list_add_tail(&efv->list, &efx->vf_reps); 221 spin_unlock_bh(&efx->vf_reps_lock); 222 if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) { 223 netif_device_attach(net_dev); 224 netif_carrier_on(net_dev); 225 } else { 226 netif_carrier_off(net_dev); 227 netif_tx_stop_all_queues(net_dev); 228 } 229 rtnl_unlock(); 230 231 net_dev->netdev_ops = &efx_ef100_rep_netdev_ops; 232 net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops; 233 net_dev->min_mtu = EFX_MIN_MTU; 234 net_dev->max_mtu = EFX_MAX_MTU; 235 net_dev->features |= NETIF_F_LLTX; 236 net_dev->hw_features |= NETIF_F_LLTX; 237 return efv; 238 fail1: 239 free_netdev(net_dev); 240 return ERR_PTR(rc); 241 } 242 243 static int efx_ef100_configure_rep(struct efx_rep *efv) 244 { 245 struct efx_nic *efx = efv->parent; 246 u32 selector; 247 int rc; 248 249 efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE; 250 /* Construct mport selector for corresponding VF */ 251 efx_mae_mport_vf(efx, efv->idx, &selector); 252 /* Look up actual mport ID */ 253 rc = efx_mae_lookup_mport(efx, selector, &efv->mport); 254 if (rc) 255 return rc; 256 pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport); 257 /* mport label should fit in 16 bits */ 258 WARN_ON(efv->mport >> 16); 259 260 return efx_tc_configure_default_rule_rep(efv); 261 } 262 263 static void efx_ef100_deconfigure_rep(struct efx_rep *efv) 264 { 265 struct efx_nic *efx = efv->parent; 266 267 efx_tc_deconfigure_default_rule(efx, &efv->dflt); 268 } 269 270 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) 271 { 272 struct efx_nic *efx = efv->parent; 273 274 rtnl_lock(); 275 spin_lock_bh(&efx->vf_reps_lock); 276 list_del(&efv->list); 277 spin_unlock_bh(&efx->vf_reps_lock); 278 rtnl_unlock(); 279 synchronize_rcu(); 280 free_netdev(efv->net_dev); 281 } 282 283 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) 284 { 285 struct efx_rep *efv; 286 int rc; 287 288 efv = efx_ef100_rep_create_netdev(efx, i); 289 if (IS_ERR(efv)) { 290 rc = PTR_ERR(efv); 291 pci_err(efx->pci_dev, 292 "Failed to create representor for VF %d, rc %d\n", i, 293 rc); 294 return rc; 295 } 296 rc = efx_ef100_configure_rep(efv); 297 if (rc) { 298 pci_err(efx->pci_dev, 299 "Failed to configure representor for VF %d, rc %d\n", 300 i, rc); 301 goto fail1; 302 } 303 rc = register_netdev(efv->net_dev); 304 if (rc) { 305 pci_err(efx->pci_dev, 306 "Failed to register representor for VF %d, rc %d\n", 307 i, rc); 308 goto fail2; 309 } 310 pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i, 311 efv->net_dev->name); 312 return 0; 313 fail2: 314 efx_ef100_deconfigure_rep(efv); 315 fail1: 316 efx_ef100_rep_destroy_netdev(efv); 317 return rc; 318 } 319 320 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv) 321 { 322 struct net_device *rep_dev; 323 324 rep_dev = efv->net_dev; 325 if (!rep_dev) 326 return; 327 netif_dbg(efx, drv, rep_dev, "Removing VF representor\n"); 328 unregister_netdev(rep_dev); 329 efx_ef100_deconfigure_rep(efv); 330 efx_ef100_rep_destroy_netdev(efv); 331 } 332 333 void efx_ef100_fini_vfreps(struct efx_nic *efx) 334 { 335 struct ef100_nic_data *nic_data = efx->nic_data; 336 struct efx_rep *efv, *next; 337 338 if (!nic_data->grp_mae) 339 return; 340 341 list_for_each_entry_safe(efv, next, &efx->vf_reps, list) 342 efx_ef100_vfrep_destroy(efx, efv); 343 } 344 345 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight) 346 { 347 struct efx_rep *efv = container_of(napi, struct efx_rep, napi); 348 unsigned int read_index; 349 struct list_head head; 350 struct sk_buff *skb; 351 bool need_resched; 352 int spent = 0; 353 354 INIT_LIST_HEAD(&head); 355 /* Grab up to 'weight' pending SKBs */ 356 spin_lock_bh(&efv->rx_lock); 357 read_index = efv->write_index; 358 while (spent < weight && !list_empty(&efv->rx_list)) { 359 skb = list_first_entry(&efv->rx_list, struct sk_buff, list); 360 list_del(&skb->list); 361 list_add_tail(&skb->list, &head); 362 spent++; 363 } 364 spin_unlock_bh(&efv->rx_lock); 365 /* Receive them */ 366 netif_receive_skb_list(&head); 367 if (spent < weight) 368 if (napi_complete_done(napi, spent)) { 369 spin_lock_bh(&efv->rx_lock); 370 efv->read_index = read_index; 371 /* If write_index advanced while we were doing the 372 * RX, then storing our read_index won't re-prime the 373 * fake-interrupt. In that case, we need to schedule 374 * NAPI again to consume the additional packet(s). 375 */ 376 need_resched = efv->write_index != read_index; 377 spin_unlock_bh(&efv->rx_lock); 378 if (need_resched) 379 napi_schedule(&efv->napi); 380 } 381 return spent; 382 } 383 384 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf) 385 { 386 u8 *eh = efx_rx_buf_va(rx_buf); 387 struct sk_buff *skb; 388 bool primed; 389 390 /* Don't allow too many queued SKBs to build up, as they consume 391 * GFP_ATOMIC memory. If we overrun, just start dropping. 392 */ 393 if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) { 394 atomic64_inc(&efv->stats.rx_dropped); 395 if (net_ratelimit()) 396 netif_dbg(efv->parent, rx_err, efv->net_dev, 397 "nodesc-dropped packet of length %u\n", 398 rx_buf->len); 399 return; 400 } 401 402 skb = netdev_alloc_skb(efv->net_dev, rx_buf->len); 403 if (!skb) { 404 atomic64_inc(&efv->stats.rx_dropped); 405 if (net_ratelimit()) 406 netif_dbg(efv->parent, rx_err, efv->net_dev, 407 "noskb-dropped packet of length %u\n", 408 rx_buf->len); 409 return; 410 } 411 memcpy(skb->data, eh, rx_buf->len); 412 __skb_put(skb, rx_buf->len); 413 414 skb_record_rx_queue(skb, 0); /* rep is single-queue */ 415 416 /* Move past the ethernet header */ 417 skb->protocol = eth_type_trans(skb, efv->net_dev); 418 419 skb_checksum_none_assert(skb); 420 421 atomic64_inc(&efv->stats.rx_packets); 422 atomic64_add(rx_buf->len, &efv->stats.rx_bytes); 423 424 /* Add it to the rx list */ 425 spin_lock_bh(&efv->rx_lock); 426 primed = efv->read_index == efv->write_index; 427 list_add_tail(&skb->list, &efv->rx_list); 428 efv->write_index++; 429 spin_unlock_bh(&efv->rx_lock); 430 /* Trigger rx work */ 431 if (primed) 432 napi_schedule(&efv->napi); 433 } 434 435 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport) 436 { 437 struct efx_rep *efv, *out = NULL; 438 439 /* spinlock guards against list mutation while we're walking it; 440 * but caller must also hold rcu_read_lock() to ensure the netdev 441 * isn't freed after we drop the spinlock. 442 */ 443 spin_lock_bh(&efx->vf_reps_lock); 444 list_for_each_entry(efv, &efx->vf_reps, list) 445 if (efv->mport == mport) { 446 out = efv; 447 break; 448 } 449 spin_unlock_bh(&efx->vf_reps_lock); 450 return out; 451 } 452