1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #ifndef __SPARX5_MAIN_H__ 8 #define __SPARX5_MAIN_H__ 9 10 #include <linux/types.h> 11 #include <linux/phy/phy.h> 12 #include <linux/netdevice.h> 13 #include <linux/phy.h> 14 #include <linux/if_vlan.h> 15 #include <linux/bitmap.h> 16 #include <linux/phylink.h> 17 #include <linux/net_tstamp.h> 18 #include <linux/ptp_clock_kernel.h> 19 #include <linux/hrtimer.h> 20 21 #include "sparx5_main_regs.h" 22 23 /* Target chip type */ 24 enum spx5_target_chiptype { 25 SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */ 26 SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */ 27 SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */ 28 SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */ 29 SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */ 30 SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */ 31 SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */ 32 SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */ 33 SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */ 34 SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */ 35 }; 36 37 enum sparx5_port_max_tags { 38 SPX5_PORT_MAX_TAGS_NONE, /* No extra tags allowed */ 39 SPX5_PORT_MAX_TAGS_ONE, /* Single tag allowed */ 40 SPX5_PORT_MAX_TAGS_TWO /* Single and double tag allowed */ 41 }; 42 43 enum sparx5_vlan_port_type { 44 SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */ 45 SPX5_VLAN_PORT_TYPE_C, /* C-port */ 46 SPX5_VLAN_PORT_TYPE_S, /* S-port */ 47 SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */ 48 }; 49 50 #define SPX5_PORTS 65 51 #define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */ 52 #define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */ 53 #define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */ 54 #define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */ 55 #define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */ 56 #define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/ 57 #define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */ 58 59 #define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */ 60 #define PGID_UC_FLOOD (PGID_BASE + 0) 61 #define PGID_MC_FLOOD (PGID_BASE + 1) 62 #define PGID_IPV4_MC_DATA (PGID_BASE + 2) 63 #define PGID_IPV4_MC_CTRL (PGID_BASE + 3) 64 #define PGID_IPV6_MC_DATA (PGID_BASE + 4) 65 #define PGID_IPV6_MC_CTRL (PGID_BASE + 5) 66 #define PGID_BCAST (PGID_BASE + 6) 67 #define PGID_CPU (PGID_BASE + 7) 68 #define PGID_MCAST_START (PGID_BASE + 8) 69 70 #define PGID_TABLE_SIZE 3290 71 72 #define IFH_LEN 9 /* 36 bytes */ 73 #define NULL_VID 0 74 #define SPX5_MACT_PULL_DELAY (2 * HZ) 75 #define SPX5_STATS_CHECK_DELAY (1 * HZ) 76 #define SPX5_PRIOS 8 /* Number of priority queues */ 77 #define SPX5_BUFFER_CELL_SZ 184 /* Cell size */ 78 #define SPX5_BUFFER_MEMORY 4194280 /* 22795 words * 184 bytes */ 79 80 #define XTR_QUEUE 0 81 #define INJ_QUEUE 0 82 83 #define FDMA_DCB_MAX 64 84 #define FDMA_RX_DCB_MAX_DBS 15 85 #define FDMA_TX_DCB_MAX_DBS 1 86 87 #define SPARX5_PHC_COUNT 3 88 #define SPARX5_PHC_PORT 0 89 90 #define IFH_REW_OP_NOOP 0x0 91 #define IFH_REW_OP_ONE_STEP_PTP 0x3 92 #define IFH_REW_OP_TWO_STEP_PTP 0x4 93 94 #define IFH_PDU_TYPE_NONE 0x0 95 #define IFH_PDU_TYPE_PTP 0x5 96 #define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6 97 #define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7 98 99 struct sparx5; 100 101 struct sparx5_db_hw { 102 u64 dataptr; 103 u64 status; 104 }; 105 106 struct sparx5_rx_dcb_hw { 107 u64 nextptr; 108 u64 info; 109 struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; 110 }; 111 112 struct sparx5_tx_dcb_hw { 113 u64 nextptr; 114 u64 info; 115 struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; 116 }; 117 118 /* Frame DMA receive state: 119 * For each DB, there is a SKB, and the skb data pointer is mapped in 120 * the DB. Once a frame is received the skb is given to the upper layers 121 * and a new skb is added to the dcb. 122 * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. 123 */ 124 struct sparx5_rx { 125 struct sparx5_rx_dcb_hw *dcb_entries; 126 struct sparx5_rx_dcb_hw *last_entry; 127 struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; 128 int db_index; 129 int dcb_index; 130 dma_addr_t dma; 131 struct napi_struct napi; 132 u32 channel_id; 133 struct net_device *ndev; 134 u64 packets; 135 }; 136 137 /* Frame DMA transmit state: 138 * DCBs are chained using the DCBs nextptr field. 139 */ 140 struct sparx5_tx { 141 struct sparx5_tx_dcb_hw *curr_entry; 142 struct sparx5_tx_dcb_hw *first_entry; 143 struct list_head db_list; 144 dma_addr_t dma; 145 u32 channel_id; 146 u64 packets; 147 u64 dropped; 148 }; 149 150 struct sparx5_port_config { 151 phy_interface_t portmode; 152 u32 bandwidth; 153 int speed; 154 int duplex; 155 enum phy_media media; 156 bool inband; 157 bool power_down; 158 bool autoneg; 159 bool serdes_reset; 160 u32 pause; 161 u32 pause_adv; 162 phy_interface_t phy_mode; 163 u32 sd_sgpio; 164 }; 165 166 struct sparx5_port { 167 struct net_device *ndev; 168 struct sparx5 *sparx5; 169 struct device_node *of_node; 170 struct phy *serdes; 171 struct sparx5_port_config conf; 172 struct phylink_config phylink_config; 173 struct phylink *phylink; 174 struct phylink_pcs phylink_pcs; 175 u16 portno; 176 /* Ingress default VLAN (pvid) */ 177 u16 pvid; 178 /* Egress default VLAN (vid) */ 179 u16 vid; 180 bool signd_internal; 181 bool signd_active_high; 182 bool signd_enable; 183 bool flow_control; 184 enum sparx5_port_max_tags max_vlan_tags; 185 enum sparx5_vlan_port_type vlan_type; 186 u32 custom_etype; 187 bool vlan_aware; 188 struct hrtimer inj_timer; 189 /* ptp */ 190 u8 ptp_cmd; 191 u16 ts_id; 192 struct sk_buff_head tx_skbs; 193 bool is_mrouter; 194 }; 195 196 enum sparx5_core_clockfreq { 197 SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */ 198 SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */ 199 SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */ 200 SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */ 201 }; 202 203 struct sparx5_phc { 204 struct ptp_clock *clock; 205 struct ptp_clock_info info; 206 struct hwtstamp_config hwtstamp_config; 207 struct sparx5 *sparx5; 208 u8 index; 209 }; 210 211 struct sparx5_skb_cb { 212 u8 rew_op; 213 u8 pdu_type; 214 u8 pdu_w16_offset; 215 u16 ts_id; 216 unsigned long jiffies; 217 }; 218 219 struct sparx5_mdb_entry { 220 struct list_head list; 221 DECLARE_BITMAP(port_mask, SPX5_PORTS); 222 unsigned char addr[ETH_ALEN]; 223 bool cpu_copy; 224 u16 vid; 225 u16 pgid_idx; 226 }; 227 228 #define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) 229 #define SPARX5_SKB_CB(skb) \ 230 ((struct sparx5_skb_cb *)((skb)->cb)) 231 232 struct sparx5 { 233 struct platform_device *pdev; 234 struct device *dev; 235 u32 chip_id; 236 enum spx5_target_chiptype target_ct; 237 void __iomem *regs[NUM_TARGETS]; 238 int port_count; 239 struct mutex lock; /* MAC reg lock */ 240 /* port structures are in net device */ 241 struct sparx5_port *ports[SPX5_PORTS]; 242 enum sparx5_core_clockfreq coreclock; 243 /* Statistics */ 244 u32 num_stats; 245 u32 num_ethtool_stats; 246 const char * const *stats_layout; 247 u64 *stats; 248 /* Workqueue for reading stats */ 249 struct mutex queue_stats_lock; 250 struct delayed_work stats_work; 251 struct workqueue_struct *stats_queue; 252 /* Notifiers */ 253 struct notifier_block netdevice_nb; 254 struct notifier_block switchdev_nb; 255 struct notifier_block switchdev_blocking_nb; 256 /* Switch state */ 257 u8 base_mac[ETH_ALEN]; 258 /* Associated bridge device (when bridged) */ 259 struct net_device *hw_bridge_dev; 260 /* Bridged interfaces */ 261 DECLARE_BITMAP(bridge_mask, SPX5_PORTS); 262 DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS); 263 DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS); 264 DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS); 265 /* SW MAC table */ 266 struct list_head mact_entries; 267 /* mac table list (mact_entries) mutex */ 268 struct mutex mact_lock; 269 /* SW MDB table */ 270 struct list_head mdb_entries; 271 /* mdb list mutex */ 272 struct mutex mdb_lock; 273 struct delayed_work mact_work; 274 struct workqueue_struct *mact_queue; 275 /* Board specifics */ 276 bool sd_sgpio_remapping; 277 /* Register based inj/xtr */ 278 int xtr_irq; 279 /* Frame DMA */ 280 int fdma_irq; 281 struct sparx5_rx rx; 282 struct sparx5_tx tx; 283 /* PTP */ 284 bool ptp; 285 struct sparx5_phc phc[SPARX5_PHC_COUNT]; 286 spinlock_t ptp_clock_lock; /* lock for phc */ 287 spinlock_t ptp_ts_id_lock; /* lock for ts_id */ 288 struct mutex ptp_lock; /* lock for ptp interface state */ 289 u16 ptp_skbs; 290 int ptp_irq; 291 /* PGID allocation map */ 292 u8 pgid_map[PGID_TABLE_SIZE]; 293 }; 294 295 /* sparx5_switchdev.c */ 296 int sparx5_register_notifier_blocks(struct sparx5 *sparx5); 297 void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); 298 299 /* sparx5_packet.c */ 300 struct frame_info { 301 int src_port; 302 u32 timestamp; 303 }; 304 305 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); 306 void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); 307 irqreturn_t sparx5_xtr_handler(int irq, void *_priv); 308 netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); 309 int sparx5_manual_injection_mode(struct sparx5 *sparx5); 310 void sparx5_port_inj_timer_setup(struct sparx5_port *port); 311 312 /* sparx5_fdma.c */ 313 int sparx5_fdma_start(struct sparx5 *sparx5); 314 int sparx5_fdma_stop(struct sparx5 *sparx5); 315 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); 316 irqreturn_t sparx5_fdma_handler(int irq, void *args); 317 318 /* sparx5_mactable.c */ 319 void sparx5_mact_pull_work(struct work_struct *work); 320 int sparx5_mact_learn(struct sparx5 *sparx5, int port, 321 const unsigned char mac[ETH_ALEN], u16 vid); 322 bool sparx5_mact_getnext(struct sparx5 *sparx5, 323 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2); 324 int sparx5_mact_find(struct sparx5 *sparx5, 325 const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2); 326 int sparx5_mact_forget(struct sparx5 *sparx5, 327 const unsigned char mac[ETH_ALEN], u16 vid); 328 int sparx5_add_mact_entry(struct sparx5 *sparx5, 329 struct net_device *dev, 330 u16 portno, 331 const unsigned char *addr, u16 vid); 332 int sparx5_del_mact_entry(struct sparx5 *sparx5, 333 const unsigned char *addr, 334 u16 vid); 335 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr); 336 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr); 337 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs); 338 void sparx5_mact_init(struct sparx5 *sparx5); 339 340 /* sparx5_vlan.c */ 341 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable); 342 void sparx5_pgid_clear(struct sparx5 *spx5, int pgid); 343 void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]); 344 void sparx5_update_fwd(struct sparx5 *sparx5); 345 void sparx5_vlan_init(struct sparx5 *sparx5); 346 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno); 347 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid, 348 bool untagged); 349 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid); 350 void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port); 351 352 /* sparx5_calendar.c */ 353 int sparx5_config_auto_calendar(struct sparx5 *sparx5); 354 int sparx5_config_dsm_calendar(struct sparx5 *sparx5); 355 356 /* sparx5_ethtool.c */ 357 void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); 358 int sparx_stats_init(struct sparx5 *sparx5); 359 360 /* sparx5_netdev.c */ 361 void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp); 362 void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op); 363 void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type); 364 void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset); 365 void sparx5_set_port_ifh(void *ifh_hdr, u16 portno); 366 bool sparx5_netdevice_check(const struct net_device *dev); 367 struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno); 368 int sparx5_register_netdevs(struct sparx5 *sparx5); 369 void sparx5_destroy_netdevs(struct sparx5 *sparx5); 370 void sparx5_unregister_netdevs(struct sparx5 *sparx5); 371 372 /* sparx5_ptp.c */ 373 int sparx5_ptp_init(struct sparx5 *sparx5); 374 void sparx5_ptp_deinit(struct sparx5 *sparx5); 375 int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr); 376 int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr); 377 void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, 378 u64 timestamp); 379 int sparx5_ptp_txtstamp_request(struct sparx5_port *port, 380 struct sk_buff *skb); 381 void sparx5_ptp_txtstamp_release(struct sparx5_port *port, 382 struct sk_buff *skb); 383 irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); 384 385 /* sparx5_pgid.c */ 386 enum sparx5_pgid_type { 387 SPX5_PGID_FREE, 388 SPX5_PGID_RESERVED, 389 SPX5_PGID_MULTICAST, 390 }; 391 392 void sparx5_pgid_init(struct sparx5 *spx5); 393 int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx); 394 int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); 395 int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); 396 397 /* Clock period in picoseconds */ 398 static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) 399 { 400 switch (cclock) { 401 case SPX5_CORE_CLOCK_250MHZ: 402 return 4000; 403 case SPX5_CORE_CLOCK_500MHZ: 404 return 2000; 405 case SPX5_CORE_CLOCK_625MHZ: 406 default: 407 return 1600; 408 } 409 } 410 411 static inline bool sparx5_is_baser(phy_interface_t interface) 412 { 413 return interface == PHY_INTERFACE_MODE_5GBASER || 414 interface == PHY_INTERFACE_MODE_10GBASER || 415 interface == PHY_INTERFACE_MODE_25GBASER; 416 } 417 418 extern const struct phylink_mac_ops sparx5_phylink_mac_ops; 419 extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops; 420 extern const struct ethtool_ops sparx5_ethtool_ops; 421 422 /* Calculate raw offset */ 423 static inline __pure int spx5_offset(int id, int tinst, int tcnt, 424 int gbase, int ginst, 425 int gcnt, int gwidth, 426 int raddr, int rinst, 427 int rcnt, int rwidth) 428 { 429 WARN_ON((tinst) >= tcnt); 430 WARN_ON((ginst) >= gcnt); 431 WARN_ON((rinst) >= rcnt); 432 return gbase + ((ginst) * gwidth) + 433 raddr + ((rinst) * rwidth); 434 } 435 436 /* Read, Write and modify registers content. 437 * The register definition macros start at the id 438 */ 439 static inline void __iomem *spx5_addr(void __iomem *base[], 440 int id, int tinst, int tcnt, 441 int gbase, int ginst, 442 int gcnt, int gwidth, 443 int raddr, int rinst, 444 int rcnt, int rwidth) 445 { 446 WARN_ON((tinst) >= tcnt); 447 WARN_ON((ginst) >= gcnt); 448 WARN_ON((rinst) >= rcnt); 449 return base[id + (tinst)] + 450 gbase + ((ginst) * gwidth) + 451 raddr + ((rinst) * rwidth); 452 } 453 454 static inline void __iomem *spx5_inst_addr(void __iomem *base, 455 int gbase, int ginst, 456 int gcnt, int gwidth, 457 int raddr, int rinst, 458 int rcnt, int rwidth) 459 { 460 WARN_ON((ginst) >= gcnt); 461 WARN_ON((rinst) >= rcnt); 462 return base + 463 gbase + ((ginst) * gwidth) + 464 raddr + ((rinst) * rwidth); 465 } 466 467 static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt, 468 int gbase, int ginst, int gcnt, int gwidth, 469 int raddr, int rinst, int rcnt, int rwidth) 470 { 471 return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 472 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 473 } 474 475 static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt, 476 int gbase, int ginst, int gcnt, int gwidth, 477 int raddr, int rinst, int rcnt, int rwidth) 478 { 479 return readl(spx5_inst_addr(iomem, gbase, ginst, 480 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 481 } 482 483 static inline void spx5_wr(u32 val, struct sparx5 *sparx5, 484 int id, int tinst, int tcnt, 485 int gbase, int ginst, int gcnt, int gwidth, 486 int raddr, int rinst, int rcnt, int rwidth) 487 { 488 writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt, 489 gbase, ginst, gcnt, gwidth, 490 raddr, rinst, rcnt, rwidth)); 491 } 492 493 static inline void spx5_inst_wr(u32 val, void __iomem *iomem, 494 int id, int tinst, int tcnt, 495 int gbase, int ginst, int gcnt, int gwidth, 496 int raddr, int rinst, int rcnt, int rwidth) 497 { 498 writel(val, spx5_inst_addr(iomem, 499 gbase, ginst, gcnt, gwidth, 500 raddr, rinst, rcnt, rwidth)); 501 } 502 503 static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5, 504 int id, int tinst, int tcnt, 505 int gbase, int ginst, int gcnt, int gwidth, 506 int raddr, int rinst, int rcnt, int rwidth) 507 { 508 u32 nval; 509 510 nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 511 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 512 nval = (nval & ~mask) | (val & mask); 513 writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst, 514 gcnt, gwidth, raddr, rinst, rcnt, rwidth)); 515 } 516 517 static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem, 518 int id, int tinst, int tcnt, 519 int gbase, int ginst, int gcnt, int gwidth, 520 int raddr, int rinst, int rcnt, int rwidth) 521 { 522 u32 nval; 523 524 nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, 525 rinst, rcnt, rwidth)); 526 nval = (nval & ~mask) | (val & mask); 527 writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr, 528 rinst, rcnt, rwidth)); 529 } 530 531 static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst) 532 { 533 return sparx5->regs[id + tinst]; 534 } 535 536 static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5, 537 int id, int tinst, int tcnt, 538 int gbase, int ginst, int gcnt, int gwidth, 539 int raddr, int rinst, int rcnt, int rwidth) 540 { 541 return spx5_addr(sparx5->regs, id, tinst, tcnt, 542 gbase, ginst, gcnt, gwidth, 543 raddr, rinst, rcnt, rwidth); 544 } 545 546 #endif /* __SPARX5_MAIN_H__ */ 547