1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2019-2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_eswitch.h" 6 #include "ice_devlink.h" 7 #include "ice_sriov.h" 8 #include "ice_tc_lib.h" 9 10 /** 11 * ice_repr_get_sw_port_id - get port ID associated with representor 12 * @repr: pointer to port representor 13 */ 14 static int ice_repr_get_sw_port_id(struct ice_repr *repr) 15 { 16 return repr->vf->pf->hw.port_info->lport; 17 } 18 19 /** 20 * ice_repr_get_phys_port_name - get phys port name 21 * @netdev: pointer to port representor netdev 22 * @buf: write here port name 23 * @len: max length of buf 24 */ 25 static int 26 ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) 27 { 28 struct ice_netdev_priv *np = netdev_priv(netdev); 29 struct ice_repr *repr = np->repr; 30 int res; 31 32 /* Devlink port is registered and devlink core is taking care of name formatting. */ 33 if (repr->vf->devlink_port.devlink) 34 return -EOPNOTSUPP; 35 36 res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), 37 repr->vf->vf_id); 38 if (res <= 0) 39 return -EOPNOTSUPP; 40 return 0; 41 } 42 43 /** 44 * ice_repr_get_stats64 - get VF stats for VFPR use 45 * @netdev: pointer to port representor netdev 46 * @stats: pointer to struct where stats can be stored 47 */ 48 static void 49 ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 50 { 51 struct ice_netdev_priv *np = netdev_priv(netdev); 52 struct ice_eth_stats *eth_stats; 53 struct ice_vsi *vsi; 54 55 if (ice_is_vf_disabled(np->repr->vf)) 56 return; 57 vsi = np->repr->src_vsi; 58 59 ice_update_vsi_stats(vsi); 60 eth_stats = &vsi->eth_stats; 61 62 stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + 63 eth_stats->tx_multicast; 64 stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast + 65 eth_stats->rx_multicast; 66 stats->tx_bytes = eth_stats->tx_bytes; 67 stats->rx_bytes = eth_stats->rx_bytes; 68 stats->multicast = eth_stats->rx_multicast; 69 stats->tx_errors = eth_stats->tx_errors; 70 stats->tx_dropped = eth_stats->tx_discards; 71 stats->rx_dropped = eth_stats->rx_discards; 72 } 73 74 /** 75 * ice_netdev_to_repr - Get port representor for given netdevice 76 * @netdev: pointer to port representor netdev 77 */ 78 struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) 79 { 80 struct ice_netdev_priv *np = netdev_priv(netdev); 81 82 return np->repr; 83 } 84 85 /** 86 * ice_repr_open - Enable port representor's network interface 87 * @netdev: network interface device structure 88 * 89 * The open entry point is called when a port representor's network 90 * interface is made active by the system (IFF_UP). Corresponding 91 * VF is notified about link status change. 92 * 93 * Returns 0 on success 94 */ 95 static int ice_repr_open(struct net_device *netdev) 96 { 97 struct ice_repr *repr = ice_netdev_to_repr(netdev); 98 struct ice_vf *vf; 99 100 vf = repr->vf; 101 vf->link_forced = true; 102 vf->link_up = true; 103 ice_vc_notify_vf_link_state(vf); 104 105 netif_carrier_on(netdev); 106 netif_tx_start_all_queues(netdev); 107 108 return 0; 109 } 110 111 /** 112 * ice_repr_stop - Disable port representor's network interface 113 * @netdev: network interface device structure 114 * 115 * The stop entry point is called when a port representor's network 116 * interface is de-activated by the system. Corresponding 117 * VF is notified about link status change. 118 * 119 * Returns 0 on success 120 */ 121 static int ice_repr_stop(struct net_device *netdev) 122 { 123 struct ice_repr *repr = ice_netdev_to_repr(netdev); 124 struct ice_vf *vf; 125 126 vf = repr->vf; 127 vf->link_forced = true; 128 vf->link_up = false; 129 ice_vc_notify_vf_link_state(vf); 130 131 netif_carrier_off(netdev); 132 netif_tx_stop_all_queues(netdev); 133 134 return 0; 135 } 136 137 /** 138 * ice_repr_sp_stats64 - get slow path stats for port representor 139 * @dev: network interface device structure 140 * @stats: netlink stats structure 141 * 142 * RX/TX stats are being swapped here to be consistent with VF stats. In slow 143 * path, port representor receives data when the corresponding VF is sending it 144 * (and vice versa), TX and RX bytes/packets are effectively swapped on port 145 * representor. 146 */ 147 static int 148 ice_repr_sp_stats64(const struct net_device *dev, 149 struct rtnl_link_stats64 *stats) 150 { 151 struct ice_netdev_priv *np = netdev_priv(dev); 152 int vf_id = np->repr->vf->vf_id; 153 struct ice_tx_ring *tx_ring; 154 struct ice_rx_ring *rx_ring; 155 u64 pkts, bytes; 156 157 tx_ring = np->vsi->tx_rings[vf_id]; 158 ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats, 159 &pkts, &bytes); 160 stats->rx_packets = pkts; 161 stats->rx_bytes = bytes; 162 163 rx_ring = np->vsi->rx_rings[vf_id]; 164 ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats, 165 &pkts, &bytes); 166 stats->tx_packets = pkts; 167 stats->tx_bytes = bytes; 168 stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed + 169 rx_ring->rx_stats.alloc_buf_failed; 170 171 return 0; 172 } 173 174 static bool 175 ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id) 176 { 177 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; 178 } 179 180 static int 181 ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev, 182 void *sp) 183 { 184 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) 185 return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); 186 187 return -EINVAL; 188 } 189 190 static int 191 ice_repr_setup_tc_cls_flower(struct ice_repr *repr, 192 struct flow_cls_offload *flower) 193 { 194 switch (flower->command) { 195 case FLOW_CLS_REPLACE: 196 return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); 197 case FLOW_CLS_DESTROY: 198 return ice_del_cls_flower(repr->src_vsi, flower); 199 default: 200 return -EINVAL; 201 } 202 } 203 204 static int 205 ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 206 void *cb_priv) 207 { 208 struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data; 209 struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv; 210 211 switch (type) { 212 case TC_SETUP_CLSFLOWER: 213 return ice_repr_setup_tc_cls_flower(np->repr, flower); 214 default: 215 return -EOPNOTSUPP; 216 } 217 } 218 219 static LIST_HEAD(ice_repr_block_cb_list); 220 221 static int 222 ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, 223 void *type_data) 224 { 225 struct ice_netdev_priv *np = netdev_priv(netdev); 226 227 switch (type) { 228 case TC_SETUP_BLOCK: 229 return flow_block_cb_setup_simple((struct flow_block_offload *) 230 type_data, 231 &ice_repr_block_cb_list, 232 ice_repr_setup_tc_block_cb, 233 np, np, true); 234 default: 235 return -EOPNOTSUPP; 236 } 237 } 238 239 static const struct net_device_ops ice_repr_netdev_ops = { 240 .ndo_get_phys_port_name = ice_repr_get_phys_port_name, 241 .ndo_get_stats64 = ice_repr_get_stats64, 242 .ndo_open = ice_repr_open, 243 .ndo_stop = ice_repr_stop, 244 .ndo_start_xmit = ice_eswitch_port_start_xmit, 245 .ndo_setup_tc = ice_repr_setup_tc, 246 .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, 247 .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, 248 }; 249 250 /** 251 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev 252 * @netdev: pointer to netdev 253 */ 254 bool ice_is_port_repr_netdev(struct net_device *netdev) 255 { 256 return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); 257 } 258 259 /** 260 * ice_repr_reg_netdev - register port representor netdev 261 * @netdev: pointer to port representor netdev 262 */ 263 static int 264 ice_repr_reg_netdev(struct net_device *netdev) 265 { 266 eth_hw_addr_random(netdev); 267 netdev->netdev_ops = &ice_repr_netdev_ops; 268 ice_set_ethtool_repr_ops(netdev); 269 270 netdev->hw_features |= NETIF_F_HW_TC; 271 272 netif_carrier_off(netdev); 273 netif_tx_stop_all_queues(netdev); 274 275 return register_netdev(netdev); 276 } 277 278 /** 279 * ice_repr_add - add representor for VF 280 * @vf: pointer to VF structure 281 */ 282 static int ice_repr_add(struct ice_vf *vf) 283 { 284 struct ice_q_vector *q_vector; 285 struct ice_netdev_priv *np; 286 struct ice_repr *repr; 287 struct ice_vsi *vsi; 288 int err; 289 290 vsi = ice_get_vf_vsi(vf); 291 if (!vsi) 292 return -EINVAL; 293 294 repr = kzalloc(sizeof(*repr), GFP_KERNEL); 295 if (!repr) 296 return -ENOMEM; 297 298 #ifdef CONFIG_ICE_SWITCHDEV 299 repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL); 300 if (!repr->mac_rule) { 301 err = -ENOMEM; 302 goto err_alloc_rule; 303 } 304 #endif 305 306 repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); 307 if (!repr->netdev) { 308 err = -ENOMEM; 309 goto err_alloc; 310 } 311 312 repr->src_vsi = vsi; 313 repr->vf = vf; 314 vf->repr = repr; 315 np = netdev_priv(repr->netdev); 316 np->repr = repr; 317 318 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); 319 if (!q_vector) { 320 err = -ENOMEM; 321 goto err_alloc_q_vector; 322 } 323 repr->q_vector = q_vector; 324 325 err = ice_devlink_create_vf_port(vf); 326 if (err) 327 goto err_devlink; 328 329 repr->netdev->min_mtu = ETH_MIN_MTU; 330 repr->netdev->max_mtu = ICE_MAX_MTU; 331 332 SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); 333 SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); 334 err = ice_repr_reg_netdev(repr->netdev); 335 if (err) 336 goto err_netdev; 337 338 ice_virtchnl_set_repr_ops(vf); 339 340 return 0; 341 342 err_netdev: 343 ice_devlink_destroy_vf_port(vf); 344 err_devlink: 345 kfree(repr->q_vector); 346 vf->repr->q_vector = NULL; 347 err_alloc_q_vector: 348 free_netdev(repr->netdev); 349 repr->netdev = NULL; 350 err_alloc: 351 #ifdef CONFIG_ICE_SWITCHDEV 352 kfree(repr->mac_rule); 353 repr->mac_rule = NULL; 354 err_alloc_rule: 355 #endif 356 kfree(repr); 357 vf->repr = NULL; 358 return err; 359 } 360 361 /** 362 * ice_repr_rem - remove representor from VF 363 * @vf: pointer to VF structure 364 */ 365 static void ice_repr_rem(struct ice_vf *vf) 366 { 367 if (!vf->repr) 368 return; 369 370 kfree(vf->repr->q_vector); 371 vf->repr->q_vector = NULL; 372 unregister_netdev(vf->repr->netdev); 373 ice_devlink_destroy_vf_port(vf); 374 free_netdev(vf->repr->netdev); 375 vf->repr->netdev = NULL; 376 #ifdef CONFIG_ICE_SWITCHDEV 377 kfree(vf->repr->mac_rule); 378 vf->repr->mac_rule = NULL; 379 #endif 380 kfree(vf->repr); 381 vf->repr = NULL; 382 383 ice_virtchnl_set_dflt_ops(vf); 384 } 385 386 /** 387 * ice_repr_rem_from_all_vfs - remove port representor for all VFs 388 * @pf: pointer to PF structure 389 */ 390 void ice_repr_rem_from_all_vfs(struct ice_pf *pf) 391 { 392 struct ice_vf *vf; 393 unsigned int bkt; 394 395 lockdep_assert_held(&pf->vfs.table_lock); 396 397 ice_for_each_vf(pf, bkt, vf) 398 ice_repr_rem(vf); 399 } 400 401 /** 402 * ice_repr_add_for_all_vfs - add port representor for all VFs 403 * @pf: pointer to PF structure 404 */ 405 int ice_repr_add_for_all_vfs(struct ice_pf *pf) 406 { 407 struct ice_vf *vf; 408 unsigned int bkt; 409 int err; 410 411 lockdep_assert_held(&pf->vfs.table_lock); 412 413 ice_for_each_vf(pf, bkt, vf) { 414 err = ice_repr_add(vf); 415 if (err) 416 goto err; 417 } 418 419 return 0; 420 421 err: 422 ice_repr_rem_from_all_vfs(pf); 423 424 return err; 425 } 426 427 /** 428 * ice_repr_start_tx_queues - start Tx queues of port representor 429 * @repr: pointer to repr structure 430 */ 431 void ice_repr_start_tx_queues(struct ice_repr *repr) 432 { 433 netif_carrier_on(repr->netdev); 434 netif_tx_start_all_queues(repr->netdev); 435 } 436 437 /** 438 * ice_repr_stop_tx_queues - stop Tx queues of port representor 439 * @repr: pointer to repr structure 440 */ 441 void ice_repr_stop_tx_queues(struct ice_repr *repr) 442 { 443 netif_carrier_off(repr->netdev); 444 netif_tx_stop_all_queues(repr->netdev); 445 } 446 447 /** 448 * ice_repr_set_traffic_vsi - set traffic VSI for port representor 449 * @repr: repr on with VSI will be set 450 * @vsi: pointer to VSI that will be used by port representor to pass traffic 451 */ 452 void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) 453 { 454 struct ice_netdev_priv *np = netdev_priv(repr->netdev); 455 456 np->vsi = vsi; 457 } 458