1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include "sparx5_main_regs.h" 8 #include "sparx5_main.h" 9 10 #define XTR_EOF_0 ntohl((__force __be32)0x80000000u) 11 #define XTR_EOF_1 ntohl((__force __be32)0x80000001u) 12 #define XTR_EOF_2 ntohl((__force __be32)0x80000002u) 13 #define XTR_EOF_3 ntohl((__force __be32)0x80000003u) 14 #define XTR_PRUNED ntohl((__force __be32)0x80000004u) 15 #define XTR_ABORT ntohl((__force __be32)0x80000005u) 16 #define XTR_ESCAPE ntohl((__force __be32)0x80000006u) 17 #define XTR_NOT_READY ntohl((__force __be32)0x80000007u) 18 19 #define XTR_VALID_BYTES(x) (4 - ((x) & 3)) 20 21 #define INJ_TIMEOUT_NS 50000 22 23 struct frame_info { 24 int src_port; 25 }; 26 27 static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) 28 { 29 /* Start flush */ 30 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); 31 32 /* Allow to drain */ 33 mdelay(1); 34 35 /* All Queues normal */ 36 spx5_wr(0, sparx5, QS_XTR_FLUSH); 37 } 38 39 static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) 40 { 41 u8 *xtr_hdr = (u8 *)ifh; 42 43 /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ 44 u32 fwd = 45 ((u32)xtr_hdr[27] << 24) | 46 ((u32)xtr_hdr[28] << 16) | 47 ((u32)xtr_hdr[29] << 8) | 48 ((u32)xtr_hdr[30] << 0); 49 fwd = (fwd >> 5); 50 info->src_port = FIELD_GET(GENMASK(7, 1), fwd); 51 } 52 53 static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) 54 { 55 bool eof_flag = false, pruned_flag = false, abort_flag = false; 56 struct net_device *netdev; 57 struct sparx5_port *port; 58 struct frame_info fi; 59 int i, byte_cnt = 0; 60 struct sk_buff *skb; 61 u32 ifh[IFH_LEN]; 62 u32 *rxbuf; 63 64 /* Get IFH */ 65 for (i = 0; i < IFH_LEN; i++) 66 ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); 67 68 /* Decode IFH (whats needed) */ 69 sparx5_ifh_parse(ifh, &fi); 70 71 /* Map to port netdev */ 72 port = fi.src_port < SPX5_PORTS ? 73 sparx5->ports[fi.src_port] : NULL; 74 if (!port || !port->ndev) { 75 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); 76 sparx5_xtr_flush(sparx5, grp); 77 return; 78 } 79 80 /* Have netdev, get skb */ 81 netdev = port->ndev; 82 skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); 83 if (!skb) { 84 sparx5_xtr_flush(sparx5, grp); 85 dev_err(sparx5->dev, "No skb allocated\n"); 86 netdev->stats.rx_dropped++; 87 return; 88 } 89 rxbuf = (u32 *)skb->data; 90 91 /* Now, pull frame data */ 92 while (!eof_flag) { 93 u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); 94 u32 cmp = val; 95 96 if (byte_swap) 97 cmp = ntohl((__force __be32)val); 98 99 switch (cmp) { 100 case XTR_NOT_READY: 101 break; 102 case XTR_ABORT: 103 /* No accompanying data */ 104 abort_flag = true; 105 eof_flag = true; 106 break; 107 case XTR_EOF_0: 108 case XTR_EOF_1: 109 case XTR_EOF_2: 110 case XTR_EOF_3: 111 /* This assumes STATUS_WORD_POS == 1, Status 112 * just after last data 113 */ 114 byte_cnt -= (4 - XTR_VALID_BYTES(val)); 115 eof_flag = true; 116 break; 117 case XTR_PRUNED: 118 /* But get the last 4 bytes as well */ 119 eof_flag = true; 120 pruned_flag = true; 121 fallthrough; 122 case XTR_ESCAPE: 123 *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); 124 byte_cnt += 4; 125 rxbuf++; 126 break; 127 default: 128 *rxbuf = val; 129 byte_cnt += 4; 130 rxbuf++; 131 } 132 } 133 134 if (abort_flag || pruned_flag || !eof_flag) { 135 netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n", 136 abort_flag, pruned_flag, eof_flag); 137 kfree_skb(skb); 138 netdev->stats.rx_dropped++; 139 return; 140 } 141 142 /* Everything we see on an interface that is in the HW bridge 143 * has already been forwarded 144 */ 145 if (test_bit(port->portno, sparx5->bridge_mask)) 146 skb->offload_fwd_mark = 1; 147 148 /* Finish up skb */ 149 skb_put(skb, byte_cnt - ETH_FCS_LEN); 150 eth_skb_pad(skb); 151 skb->protocol = eth_type_trans(skb, netdev); 152 netif_rx(skb); 153 netdev->stats.rx_bytes += skb->len; 154 netdev->stats.rx_packets++; 155 } 156 157 static int sparx5_inject(struct sparx5 *sparx5, 158 u32 *ifh, 159 struct sk_buff *skb, 160 struct net_device *ndev) 161 { 162 int grp = INJ_QUEUE; 163 u32 val, w, count; 164 u8 *buf; 165 166 val = spx5_rd(sparx5, QS_INJ_STATUS); 167 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) { 168 pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n", 169 QS_INJ_STATUS_FIFO_RDY_GET(val)); 170 return -EBUSY; 171 } 172 173 /* Indicate SOF */ 174 spx5_wr(QS_INJ_CTRL_SOF_SET(1) | 175 QS_INJ_CTRL_GAP_SIZE_SET(1), 176 sparx5, QS_INJ_CTRL(grp)); 177 178 /* Write the IFH to the chip. */ 179 for (w = 0; w < IFH_LEN; w++) 180 spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); 181 182 /* Write words, round up */ 183 count = DIV_ROUND_UP(skb->len, 4); 184 buf = skb->data; 185 for (w = 0; w < count; w++, buf += 4) { 186 val = get_unaligned((const u32 *)buf); 187 spx5_wr(val, sparx5, QS_INJ_WR(grp)); 188 } 189 190 /* Add padding */ 191 while (w < (60 / 4)) { 192 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 193 w++; 194 } 195 196 /* Indicate EOF and valid bytes in last word */ 197 spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | 198 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | 199 QS_INJ_CTRL_EOF_SET(1), 200 sparx5, QS_INJ_CTRL(grp)); 201 202 /* Add dummy CRC */ 203 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 204 w++; 205 206 val = spx5_rd(sparx5, QS_INJ_STATUS); 207 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 208 struct sparx5_port *port = netdev_priv(ndev); 209 210 pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n", 211 QS_INJ_STATUS_WMARK_REACHED_GET(val)); 212 netif_stop_queue(ndev); 213 hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, 214 HRTIMER_MODE_REL); 215 } 216 217 return NETDEV_TX_OK; 218 } 219 220 int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) 221 { 222 struct net_device_stats *stats = &dev->stats; 223 struct sparx5_port *port = netdev_priv(dev); 224 struct sparx5 *sparx5 = port->sparx5; 225 int ret; 226 227 ret = sparx5_inject(sparx5, port->ifh, skb, dev); 228 229 if (ret == NETDEV_TX_OK) { 230 stats->tx_bytes += skb->len; 231 stats->tx_packets++; 232 skb_tx_timestamp(skb); 233 dev_kfree_skb_any(skb); 234 } else { 235 stats->tx_dropped++; 236 } 237 return ret; 238 } 239 240 static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) 241 { 242 struct sparx5_port *port = container_of(tmr, struct sparx5_port, 243 inj_timer); 244 int grp = INJ_QUEUE; 245 u32 val; 246 247 val = spx5_rd(port->sparx5, QS_INJ_STATUS); 248 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 249 pr_err_ratelimited("Injection: Reset watermark count\n"); 250 /* Reset Watermark count to restart */ 251 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 252 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 253 port->sparx5, 254 DSM_DEV_TX_STOP_WM_CFG(port->portno)); 255 } 256 netif_wake_queue(port->ndev); 257 return HRTIMER_NORESTART; 258 } 259 260 int sparx5_manual_injection_mode(struct sparx5 *sparx5) 261 { 262 const int byte_swap = 1; 263 int portno; 264 265 /* Change mode to manual extraction and injection */ 266 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) | 267 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | 268 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), 269 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); 270 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) | 271 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), 272 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); 273 274 /* CPU ports capture setup */ 275 for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { 276 /* ASM CPU port: No preamble, IFH, enable padding */ 277 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | 278 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | 279 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ 280 sparx5, ASM_PORT_CFG(portno)); 281 282 /* Reset WM cnt to unclog queued frames */ 283 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 284 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 285 sparx5, 286 DSM_DEV_TX_STOP_WM_CFG(portno)); 287 288 /* Set Disassembler Stop Watermark level */ 289 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0), 290 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, 291 sparx5, 292 DSM_DEV_TX_STOP_WM_CFG(portno)); 293 294 /* Enable Disassembler buffer underrun watchdog 295 */ 296 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0), 297 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, 298 sparx5, 299 DSM_BUF_CFG(portno)); 300 } 301 return 0; 302 } 303 304 irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5) 305 { 306 struct sparx5 *s5 = _sparx5; 307 int poll = 64; 308 309 /* Check data in queue */ 310 while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) 311 sparx5_xtr_grp(s5, XTR_QUEUE, false); 312 313 return IRQ_HANDLED; 314 } 315 316 void sparx5_port_inj_timer_setup(struct sparx5_port *port) 317 { 318 hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 319 port->inj_timer.function = sparx5_injection_timeout; 320 } 321