1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch.h" 6 #include "ice_devlink.h" 7 #include "ice_sriov.h" 8 #include "ice_tc_lib.h" 9 #include "ice_dcb_lib.h" 10 11 /** 12 * ice_repr_get_sw_port_id - get port ID associated with representor 13 * @repr: pointer to port representor 14 */ 15 static int ice_repr_get_sw_port_id(struct ice_repr *repr) 16 { 17 return repr->vf->pf->hw.port_info->lport; 18 } 19 20 /** 21 * ice_repr_get_phys_port_name - get phys port name 22 * @netdev: pointer to port representor netdev 23 * @buf: write here port name 24 * @len: max length of buf 25 */ 26 static int 27 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) 28 { 29 struct ice_netdev_priv *np = netdev_priv(netdev); 30 struct ice_repr *repr = np->repr; 31 int res; 32 33 /* Devlink port is registered and devlink core is taking care of name formatting. */ 34 if (repr->vf->devlink_port.devlink) 35 return -EOPNOTSUPP; 36 37 res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), 38 repr->vf->vf_id); 39 if (res <= 0) 40 return -EOPNOTSUPP; 41 return 0; 42 } 43 44 /** 45 * ice_repr_get_stats64 - get VF stats for VFPR use 46 * @netdev: pointer to port representor netdev 47 * @stats: pointer to struct where stats can be stored 48 */ 49 static void 50 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 51 { 52 struct ice_netdev_priv *np = netdev_priv(netdev); 53 struct ice_eth_stats *eth_stats; 54 struct ice_vsi *vsi; 55 56 if (ice_is_vf_disabled(np->repr->vf)) 57 return; 58 vsi = np->repr->src_vsi; 59 60 ice_update_vsi_stats(vsi); 61 eth_stats = &vsi->eth_stats; 62 63 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + 64 eth_stats->tx_multicast; 65 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + 66 eth_stats->rx_multicast; 67 stats->tx_bytes = eth_stats->tx_bytes; 68 stats->rx_bytes = eth_stats->rx_bytes; 69 stats->multicast = eth_stats->rx_multicast; 70 stats->tx_errors = eth_stats->tx_errors; 71 stats->tx_dropped = eth_stats->tx_discards; 72 stats->rx_dropped = eth_stats->rx_discards; 73 } 74 75 /** 76 * ice_netdev_to_repr - Get port representor for given netdevice 77 * @netdev: pointer to port representor netdev 78 */ 79 struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) 80 { 81 struct ice_netdev_priv *np = netdev_priv(netdev); 82 83 return np->repr; 84 } 85 86 /** 87 * ice_repr_open - Enable port representor's network interface 88 * @netdev: network interface device structure 89 * 90 * The open entry point is called when a port representor's network 91 * interface is made active by the system (IFF_UP). Corresponding 92 * VF is notified about link status change. 93 * 94 * Returns 0 on success 95 */ 96 static int ice_repr_open(struct net_device *netdev) 97 { 98 struct ice_repr *repr = ice_netdev_to_repr(netdev); 99 struct ice_vf *vf; 100 101 vf = repr->vf; 102 vf->link_forced = true; 103 vf->link_up = true; 104 ice_vc_notify_vf_link_state(vf); 105 106 netif_carrier_on(netdev); 107 netif_tx_start_all_queues(netdev); 108 109 return 0; 110 } 111 112 /** 113 * ice_repr_stop - Disable port representor's network interface 114 * @netdev: network interface device structure 115 * 116 * The stop entry point is called when a port representor's network 117 * interface is de-activated by the system. Corresponding 118 * VF is notified about link status change. 119 * 120 * Returns 0 on success 121 */ 122 static int ice_repr_stop(struct net_device *netdev) 123 { 124 struct ice_repr *repr = ice_netdev_to_repr(netdev); 125 struct ice_vf *vf; 126 127 vf = repr->vf; 128 vf->link_forced = true; 129 vf->link_up = false; 130 ice_vc_notify_vf_link_state(vf); 131 132 netif_carrier_off(netdev); 133 netif_tx_stop_all_queues(netdev); 134 135 return 0; 136 } 137 138 /** 139 * ice_repr_sp_stats64 - get slow path stats for port representor 140 * @dev: network interface device structure 141 * @stats: netlink stats structure 142 * 143 * RX/TX stats are being swapped here to be consistent with VF stats. In slow 144 * path, port representor receives data when the corresponding VF is sending it 145 * (and vice versa), TX and RX bytes/packets are effectively swapped on port 146 * representor. 147 */ 148 static int 149 ice_repr_sp_stats64(const struct net_device *dev, 150 struct rtnl_link_stats64 *stats) 151 { 152 struct ice_netdev_priv *np = netdev_priv(dev); 153 int vf_id = np->repr->vf->vf_id; 154 struct ice_tx_ring *tx_ring; 155 struct ice_rx_ring *rx_ring; 156 u64 pkts, bytes; 157 158 tx_ring = np->vsi->tx_rings[vf_id]; 159 ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp, 160 tx_ring->ring_stats->stats, 161 &pkts, &bytes); 162 stats->rx_packets = pkts; 163 stats->rx_bytes = bytes; 164 165 rx_ring = np->vsi->rx_rings[vf_id]; 166 ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp, 167 rx_ring->ring_stats->stats, 168 &pkts, &bytes); 169 stats->tx_packets = pkts; 170 stats->tx_bytes = bytes; 171 stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed + 172 rx_ring->ring_stats->rx_stats.alloc_buf_failed; 173 174 return 0; 175 } 176 177 static bool 178 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) 179 { 180 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; 181 } 182 183 static int 184 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, 185 void *sp) 186 { 187 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) 188 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); 189 190 return -EINVAL; 191 } 192 193 static int 194 ice_repr_setup_tc_cls_flower(struct ice_repr *repr, 195 struct flow_cls_offload *flower) 196 { 197 switch (flower->command) { 198 case FLOW_CLS_REPLACE: 199 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); 200 case FLOW_CLS_DESTROY: 201 return ice_del_cls_flower(repr->src_vsi, flower); 202 default: 203 return -EINVAL; 204 } 205 } 206 207 static int 208 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 209 void *cb_priv) 210 { 211 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; 212 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; 213 214 switch (type) { 215 case TC_SETUP_CLSFLOWER: 216 return ice_repr_setup_tc_cls_flower(np->repr, flower); 217 default: 218 return -EOPNOTSUPP; 219 } 220 } 221 222 static LIST_HEAD(ice_repr_block_cb_list); 223 224 static int 225 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, 226 void *type_data) 227 { 228 struct ice_netdev_priv *np = netdev_priv(netdev); 229 230 switch (type) { 231 case TC_SETUP_BLOCK: 232 return flow_block_cb_setup_simple((struct flow_block_offload *) 233 type_data, 234 &ice_repr_block_cb_list, 235 ice_repr_setup_tc_block_cb, 236 np, np, true); 237 default: 238 return -EOPNOTSUPP; 239 } 240 } 241 242 static const struct net_device_ops ice_repr_netdev_ops = { 243 .ndo_get_phys_port_name = ice_repr_get_phys_port_name, 244 .ndo_get_stats64 = ice_repr_get_stats64, 245 .ndo_open = ice_repr_open, 246 .ndo_stop = ice_repr_stop, 247 .ndo_start_xmit = ice_eswitch_port_start_xmit, 248 .ndo_setup_tc = ice_repr_setup_tc, 249 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, 250 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, 251 }; 252 253 /** 254 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev 255 * @netdev: pointer to netdev 256 */ 257 bool ice_is_port_repr_netdev(struct net_device *netdev) 258 { 259 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); 260 } 261 262 /** 263 * ice_repr_reg_netdev - register port representor netdev 264 * @netdev: pointer to port representor netdev 265 */ 266 static int 267 ice_repr_reg_netdev(struct net_device *netdev) 268 { 269 eth_hw_addr_random(netdev); 270 netdev->netdev_ops = &ice_repr_netdev_ops; 271 ice_set_ethtool_repr_ops(netdev); 272 273 netdev->hw_features |= NETIF_F_HW_TC; 274 275 netif_carrier_off(netdev); 276 netif_tx_stop_all_queues(netdev); 277 278 return register_netdev(netdev); 279 } 280 281 /** 282 * ice_repr_add - add representor for VF 283 * @vf: pointer to VF structure 284 */ 285 static int ice_repr_add(struct ice_vf *vf) 286 { 287 struct ice_q_vector *q_vector; 288 struct ice_netdev_priv *np; 289 struct ice_repr *repr; 290 struct ice_vsi *vsi; 291 int err; 292 293 vsi = ice_get_vf_vsi(vf); 294 if (!vsi) 295 return -EINVAL; 296 297 repr = kzalloc(sizeof(*repr), GFP_KERNEL); 298 if (!repr) 299 return -ENOMEM; 300 301 #ifdef CONFIG_ICE_SWITCHDEV 302 repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL); 303 if (!repr->mac_rule) { 304 err = -ENOMEM; 305 goto err_alloc_rule; 306 } 307 #endif 308 309 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); 310 if (!repr->netdev) { 311 err = -ENOMEM; 312 goto err_alloc; 313 } 314 315 repr->src_vsi = vsi; 316 repr->vf = vf; 317 vf->repr = repr; 318 np = netdev_priv(repr->netdev); 319 np->repr = repr; 320 321 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); 322 if (!q_vector) { 323 err = -ENOMEM; 324 goto err_alloc_q_vector; 325 } 326 repr->q_vector = q_vector; 327 328 err = ice_devlink_create_vf_port(vf); 329 if (err) 330 goto err_devlink; 331 332 repr->netdev->min_mtu = ETH_MIN_MTU; 333 repr->netdev->max_mtu = ICE_MAX_MTU; 334 335 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); 336 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); 337 err = ice_repr_reg_netdev(repr->netdev); 338 if (err) 339 goto err_netdev; 340 341 ice_virtchnl_set_repr_ops(vf); 342 343 return 0; 344 345 err_netdev: 346 ice_devlink_destroy_vf_port(vf); 347 err_devlink: 348 kfree(repr->q_vector); 349 vf->repr->q_vector = NULL; 350 err_alloc_q_vector: 351 free_netdev(repr->netdev); 352 repr->netdev = NULL; 353 err_alloc: 354 #ifdef CONFIG_ICE_SWITCHDEV 355 kfree(repr->mac_rule); 356 repr->mac_rule = NULL; 357 err_alloc_rule: 358 #endif 359 kfree(repr); 360 vf->repr = NULL; 361 return err; 362 } 363 364 /** 365 * ice_repr_rem - remove representor from VF 366 * @vf: pointer to VF structure 367 */ 368 static void ice_repr_rem(struct ice_vf *vf) 369 { 370 if (!vf->repr) 371 return; 372 373 kfree(vf->repr->q_vector); 374 vf->repr->q_vector = NULL; 375 unregister_netdev(vf->repr->netdev); 376 ice_devlink_destroy_vf_port(vf); 377 free_netdev(vf->repr->netdev); 378 vf->repr->netdev = NULL; 379 #ifdef CONFIG_ICE_SWITCHDEV 380 kfree(vf->repr->mac_rule); 381 vf->repr->mac_rule = NULL; 382 #endif 383 kfree(vf->repr); 384 vf->repr = NULL; 385 386 ice_virtchnl_set_dflt_ops(vf); 387 } 388 389 /** 390 * ice_repr_rem_from_all_vfs - remove port representor for all VFs 391 * @pf: pointer to PF structure 392 */ 393 void ice_repr_rem_from_all_vfs(struct ice_pf *pf) 394 { 395 struct devlink *devlink; 396 struct ice_vf *vf; 397 unsigned int bkt; 398 399 lockdep_assert_held(&pf->vfs.table_lock); 400 401 ice_for_each_vf(pf, bkt, vf) 402 ice_repr_rem(vf); 403 404 /* since all port representors are destroyed, there is 405 * no point in keeping the nodes 406 */ 407 devlink = priv_to_devlink(pf); 408 devl_lock(devlink); 409 devl_rate_nodes_destroy(devlink); 410 devl_unlock(devlink); 411 } 412 413 /** 414 * ice_repr_add_for_all_vfs - add port representor for all VFs 415 * @pf: pointer to PF structure 416 */ 417 int ice_repr_add_for_all_vfs(struct ice_pf *pf) 418 { 419 struct devlink *devlink; 420 struct ice_vf *vf; 421 unsigned int bkt; 422 int err; 423 424 lockdep_assert_held(&pf->vfs.table_lock); 425 426 ice_for_each_vf(pf, bkt, vf) { 427 err = ice_repr_add(vf); 428 if (err) 429 goto err; 430 } 431 432 /* only export if ADQ and DCB disabled */ 433 if (ice_is_adq_active(pf) || ice_is_dcb_active(pf)) 434 return 0; 435 436 devlink = priv_to_devlink(pf); 437 ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); 438 439 return 0; 440 441 err: 442 ice_repr_rem_from_all_vfs(pf); 443 444 return err; 445 } 446 447 /** 448 * ice_repr_start_tx_queues - start Tx queues of port representor 449 * @repr: pointer to repr structure 450 */ 451 void ice_repr_start_tx_queues(struct ice_repr *repr) 452 { 453 netif_carrier_on(repr->netdev); 454 netif_tx_start_all_queues(repr->netdev); 455 } 456 457 /** 458 * ice_repr_stop_tx_queues - stop Tx queues of port representor 459 * @repr: pointer to repr structure 460 */ 461 void ice_repr_stop_tx_queues(struct ice_repr *repr) 462 { 463 netif_carrier_off(repr->netdev); 464 netif_tx_stop_all_queues(repr->netdev); 465 } 466 467 /** 468 * ice_repr_set_traffic_vsi - set traffic VSI for port representor 469 * @repr: repr on with VSI will be set 470 * @vsi: pointer to VSI that will be used by port representor to pass traffic 471 */ 472 void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) 473 { 474 struct ice_netdev_priv *np = netdev_priv(repr->netdev); 475 476 np->vsi = vsi; 477 } 478