1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include "sparx5_main_regs.h" 8 #include "sparx5_main.h" 9 10 #define XTR_EOF_0 ntohl((__force __be32)0x80000000u) 11 #define XTR_EOF_1 ntohl((__force __be32)0x80000001u) 12 #define XTR_EOF_2 ntohl((__force __be32)0x80000002u) 13 #define XTR_EOF_3 ntohl((__force __be32)0x80000003u) 14 #define XTR_PRUNED ntohl((__force __be32)0x80000004u) 15 #define XTR_ABORT ntohl((__force __be32)0x80000005u) 16 #define XTR_ESCAPE ntohl((__force __be32)0x80000006u) 17 #define XTR_NOT_READY ntohl((__force __be32)0x80000007u) 18 19 #define XTR_VALID_BYTES(x) (4 - ((x) & 3)) 20 21 #define INJ_TIMEOUT_NS 50000 22 23 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) 24 { 25 /* Start flush */ 26 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); 27 28 /* Allow to drain */ 29 mdelay(1); 30 31 /* All Queues normal */ 32 spx5_wr(0, sparx5, QS_XTR_FLUSH); 33 } 34 35 void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) 36 { 37 u8 *xtr_hdr = (u8 *)ifh; 38 39 /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ 40 u32 fwd = 41 ((u32)xtr_hdr[27] << 24) | 42 ((u32)xtr_hdr[28] << 16) | 43 ((u32)xtr_hdr[29] << 8) | 44 ((u32)xtr_hdr[30] << 0); 45 fwd = (fwd >> 5); 46 info->src_port = FIELD_GET(GENMASK(7, 1), fwd); 47 } 48 49 static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) 50 { 51 bool eof_flag = false, pruned_flag = false, abort_flag = false; 52 struct net_device *netdev; 53 struct sparx5_port *port; 54 struct frame_info fi; 55 int i, byte_cnt = 0; 56 struct sk_buff *skb; 57 u32 ifh[IFH_LEN]; 58 u32 *rxbuf; 59 60 /* Get IFH */ 61 for (i = 0; i < IFH_LEN; i++) 62 ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); 63 64 /* Decode IFH (whats needed) */ 65 sparx5_ifh_parse(ifh, &fi); 66 67 /* Map to port netdev */ 68 port = fi.src_port < SPX5_PORTS ? 69 sparx5->ports[fi.src_port] : NULL; 70 if (!port || !port->ndev) { 71 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); 72 sparx5_xtr_flush(sparx5, grp); 73 return; 74 } 75 76 /* Have netdev, get skb */ 77 netdev = port->ndev; 78 skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); 79 if (!skb) { 80 sparx5_xtr_flush(sparx5, grp); 81 dev_err(sparx5->dev, "No skb allocated\n"); 82 netdev->stats.rx_dropped++; 83 return; 84 } 85 rxbuf = (u32 *)skb->data; 86 87 /* Now, pull frame data */ 88 while (!eof_flag) { 89 u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); 90 u32 cmp = val; 91 92 if (byte_swap) 93 cmp = ntohl((__force __be32)val); 94 95 switch (cmp) { 96 case XTR_NOT_READY: 97 break; 98 case XTR_ABORT: 99 /* No accompanying data */ 100 abort_flag = true; 101 eof_flag = true; 102 break; 103 case XTR_EOF_0: 104 case XTR_EOF_1: 105 case XTR_EOF_2: 106 case XTR_EOF_3: 107 /* This assumes STATUS_WORD_POS == 1, Status 108 * just after last data 109 */ 110 byte_cnt -= (4 - XTR_VALID_BYTES(val)); 111 eof_flag = true; 112 break; 113 case XTR_PRUNED: 114 /* But get the last 4 bytes as well */ 115 eof_flag = true; 116 pruned_flag = true; 117 fallthrough; 118 case XTR_ESCAPE: 119 *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); 120 byte_cnt += 4; 121 rxbuf++; 122 break; 123 default: 124 *rxbuf = val; 125 byte_cnt += 4; 126 rxbuf++; 127 } 128 } 129 130 if (abort_flag || pruned_flag || !eof_flag) { 131 netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n", 132 abort_flag, pruned_flag, eof_flag); 133 kfree_skb(skb); 134 netdev->stats.rx_dropped++; 135 return; 136 } 137 138 /* Everything we see on an interface that is in the HW bridge 139 * has already been forwarded 140 */ 141 if (test_bit(port->portno, sparx5->bridge_mask)) 142 skb->offload_fwd_mark = 1; 143 144 /* Finish up skb */ 145 skb_put(skb, byte_cnt - ETH_FCS_LEN); 146 eth_skb_pad(skb); 147 skb->protocol = eth_type_trans(skb, netdev); 148 netdev->stats.rx_bytes += skb->len; 149 netdev->stats.rx_packets++; 150 netif_rx(skb); 151 } 152 153 static int sparx5_inject(struct sparx5 *sparx5, 154 u32 *ifh, 155 struct sk_buff *skb, 156 struct net_device *ndev) 157 { 158 int grp = INJ_QUEUE; 159 u32 val, w, count; 160 u8 *buf; 161 162 val = spx5_rd(sparx5, QS_INJ_STATUS); 163 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) { 164 pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n", 165 QS_INJ_STATUS_FIFO_RDY_GET(val)); 166 return -EBUSY; 167 } 168 169 /* Indicate SOF */ 170 spx5_wr(QS_INJ_CTRL_SOF_SET(1) | 171 QS_INJ_CTRL_GAP_SIZE_SET(1), 172 sparx5, QS_INJ_CTRL(grp)); 173 174 /* Write the IFH to the chip. */ 175 for (w = 0; w < IFH_LEN; w++) 176 spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); 177 178 /* Write words, round up */ 179 count = DIV_ROUND_UP(skb->len, 4); 180 buf = skb->data; 181 for (w = 0; w < count; w++, buf += 4) { 182 val = get_unaligned((const u32 *)buf); 183 spx5_wr(val, sparx5, QS_INJ_WR(grp)); 184 } 185 186 /* Add padding */ 187 while (w < (60 / 4)) { 188 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 189 w++; 190 } 191 192 /* Indicate EOF and valid bytes in last word */ 193 spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | 194 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | 195 QS_INJ_CTRL_EOF_SET(1), 196 sparx5, QS_INJ_CTRL(grp)); 197 198 /* Add dummy CRC */ 199 spx5_wr(0, sparx5, QS_INJ_WR(grp)); 200 w++; 201 202 val = spx5_rd(sparx5, QS_INJ_STATUS); 203 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 204 struct sparx5_port *port = netdev_priv(ndev); 205 206 pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n", 207 QS_INJ_STATUS_WMARK_REACHED_GET(val)); 208 netif_stop_queue(ndev); 209 hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, 210 HRTIMER_MODE_REL); 211 } 212 213 return NETDEV_TX_OK; 214 } 215 216 int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) 217 { 218 struct net_device_stats *stats = &dev->stats; 219 struct sparx5_port *port = netdev_priv(dev); 220 struct sparx5 *sparx5 = port->sparx5; 221 int ret; 222 223 if (sparx5->fdma_irq > 0) 224 ret = sparx5_fdma_xmit(sparx5, port->ifh, skb); 225 else 226 ret = sparx5_inject(sparx5, port->ifh, skb, dev); 227 228 if (ret == NETDEV_TX_OK) { 229 stats->tx_bytes += skb->len; 230 stats->tx_packets++; 231 skb_tx_timestamp(skb); 232 dev_kfree_skb_any(skb); 233 } else { 234 stats->tx_dropped++; 235 } 236 return ret; 237 } 238 239 static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) 240 { 241 struct sparx5_port *port = container_of(tmr, struct sparx5_port, 242 inj_timer); 243 int grp = INJ_QUEUE; 244 u32 val; 245 246 val = spx5_rd(port->sparx5, QS_INJ_STATUS); 247 if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) { 248 pr_err_ratelimited("Injection: Reset watermark count\n"); 249 /* Reset Watermark count to restart */ 250 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 251 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 252 port->sparx5, 253 DSM_DEV_TX_STOP_WM_CFG(port->portno)); 254 } 255 netif_wake_queue(port->ndev); 256 return HRTIMER_NORESTART; 257 } 258 259 int sparx5_manual_injection_mode(struct sparx5 *sparx5) 260 { 261 const int byte_swap = 1; 262 int portno; 263 264 /* Change mode to manual extraction and injection */ 265 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) | 266 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | 267 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), 268 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); 269 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) | 270 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), 271 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); 272 273 /* CPU ports capture setup */ 274 for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { 275 /* ASM CPU port: No preamble, IFH, enable padding */ 276 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | 277 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | 278 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ 279 sparx5, ASM_PORT_CFG(portno)); 280 281 /* Reset WM cnt to unclog queued frames */ 282 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 283 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 284 sparx5, 285 DSM_DEV_TX_STOP_WM_CFG(portno)); 286 287 /* Set Disassembler Stop Watermark level */ 288 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0), 289 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, 290 sparx5, 291 DSM_DEV_TX_STOP_WM_CFG(portno)); 292 293 /* Enable Disassembler buffer underrun watchdog 294 */ 295 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0), 296 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, 297 sparx5, 298 DSM_BUF_CFG(portno)); 299 } 300 return 0; 301 } 302 303 irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5) 304 { 305 struct sparx5 *s5 = _sparx5; 306 int poll = 64; 307 308 /* Check data in queue */ 309 while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) 310 sparx5_xtr_grp(s5, XTR_QUEUE, false); 311 312 return IRQ_HANDLED; 313 } 314 315 void sparx5_port_inj_timer_setup(struct sparx5_port *port) 316 { 317 hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 318 port->inj_timer.function = sparx5_injection_timeout; 319 } 320