1 /* 2 * QEMU Cadence GEM emulation 3 * 4 * Copyright (c) 2011 Xilinx, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> /* For crc32 */ 27 28 #include "hw/irq.h" 29 #include "hw/net/cadence_gem.h" 30 #include "hw/qdev-properties.h" 31 #include "hw/registerfields.h" 32 #include "migration/vmstate.h" 33 #include "qapi/error.h" 34 #include "qemu/log.h" 35 #include "qemu/module.h" 36 #include "sysemu/dma.h" 37 #include "net/checksum.h" 38 #include "net/eth.h" 39 40 #define CADENCE_GEM_ERR_DEBUG 0 41 #define DB_PRINT(...) do {\ 42 if (CADENCE_GEM_ERR_DEBUG) { \ 43 qemu_log(": %s: ", __func__); \ 44 qemu_log(__VA_ARGS__); \ 45 } \ 46 } while (0) 47 48 REG32(NWCTRL, 0x0) /* Network Control reg */ 49 FIELD(NWCTRL, LOOPBACK , 0, 1) 50 FIELD(NWCTRL, LOOPBACK_LOCAL , 1, 1) 51 FIELD(NWCTRL, ENABLE_RECEIVE, 2, 1) 52 FIELD(NWCTRL, ENABLE_TRANSMIT, 3, 1) 53 FIELD(NWCTRL, MAN_PORT_EN , 4, 1) 54 FIELD(NWCTRL, CLEAR_ALL_STATS_REGS , 5, 1) 55 FIELD(NWCTRL, INC_ALL_STATS_REGS, 6, 1) 56 FIELD(NWCTRL, STATS_WRITE_EN, 7, 1) 57 FIELD(NWCTRL, BACK_PRESSURE, 8, 1) 58 FIELD(NWCTRL, TRANSMIT_START , 9, 1) 59 FIELD(NWCTRL, TRANSMIT_HALT, 10, 1) 60 FIELD(NWCTRL, TX_PAUSE_FRAME_RE, 11, 1) 61 FIELD(NWCTRL, TX_PAUSE_FRAME_ZE, 12, 1) 62 FIELD(NWCTRL, STATS_TAKE_SNAP, 13, 1) 63 FIELD(NWCTRL, STATS_READ_SNAP, 14, 1) 64 FIELD(NWCTRL, STORE_RX_TS, 15, 1) 65 FIELD(NWCTRL, PFC_ENABLE, 16, 1) 66 FIELD(NWCTRL, PFC_PRIO_BASED, 17, 1) 67 FIELD(NWCTRL, FLUSH_RX_PKT_PCLK , 18, 1) 68 FIELD(NWCTRL, TX_LPI_EN, 19, 1) 69 FIELD(NWCTRL, PTP_UNICAST_ENA, 20, 1) 70 FIELD(NWCTRL, ALT_SGMII_MODE, 21, 1) 71 FIELD(NWCTRL, STORE_UDP_OFFSET, 22, 1) 72 FIELD(NWCTRL, EXT_TSU_PORT_EN, 23, 1) 73 FIELD(NWCTRL, ONE_STEP_SYNC_MO, 24, 1) 74 FIELD(NWCTRL, PFC_CTRL , 25, 1) 75 FIELD(NWCTRL, EXT_RXQ_SEL_EN , 26, 1) 76 FIELD(NWCTRL, OSS_CORRECTION_FIELD, 27, 1) 77 FIELD(NWCTRL, SEL_MII_ON_RGMII, 28, 1) 78 FIELD(NWCTRL, TWO_PT_FIVE_GIG, 29, 1) 79 FIELD(NWCTRL, IFG_EATS_QAV_CREDIT, 30, 1) 80 81 REG32(NWCFG, 0x4) /* Network Config reg */ 82 FIELD(NWCFG, SPEED, 0, 1) 83 FIELD(NWCFG, FULL_DUPLEX, 1, 1) 84 FIELD(NWCFG, DISCARD_NON_VLAN_FRAMES, 2, 1) 85 FIELD(NWCFG, JUMBO_FRAMES, 3, 1) 86 FIELD(NWCFG, PROMISC, 4, 1) 87 FIELD(NWCFG, NO_BROADCAST, 5, 1) 88 FIELD(NWCFG, MULTICAST_HASH_EN, 6, 1) 89 FIELD(NWCFG, UNICAST_HASH_EN, 7, 1) 90 FIELD(NWCFG, RECV_1536_BYTE_FRAMES, 8, 1) 91 FIELD(NWCFG, EXTERNAL_ADDR_MATCH_EN, 9, 1) 92 FIELD(NWCFG, GIGABIT_MODE_ENABLE, 10, 1) 93 FIELD(NWCFG, PCS_SELECT, 11, 1) 94 FIELD(NWCFG, RETRY_TEST, 12, 1) 95 FIELD(NWCFG, PAUSE_ENABLE, 13, 1) 96 FIELD(NWCFG, RECV_BUF_OFFSET, 14, 2) 97 FIELD(NWCFG, LEN_ERR_DISCARD, 16, 1) 98 FIELD(NWCFG, FCS_REMOVE, 17, 1) 99 FIELD(NWCFG, MDC_CLOCK_DIV, 18, 3) 100 FIELD(NWCFG, DATA_BUS_WIDTH, 21, 2) 101 FIELD(NWCFG, DISABLE_COPY_PAUSE_FRAMES, 23, 1) 102 FIELD(NWCFG, RECV_CSUM_OFFLOAD_EN, 24, 1) 103 FIELD(NWCFG, EN_HALF_DUPLEX_RX, 25, 1) 104 FIELD(NWCFG, IGNORE_RX_FCS, 26, 1) 105 FIELD(NWCFG, SGMII_MODE_ENABLE, 27, 1) 106 FIELD(NWCFG, IPG_STRETCH_ENABLE, 28, 1) 107 FIELD(NWCFG, NSP_ACCEPT, 29, 1) 108 FIELD(NWCFG, IGNORE_IPG_RX_ER, 30, 1) 109 FIELD(NWCFG, UNI_DIRECTION_ENABLE, 31, 1) 110 111 REG32(NWSTATUS, 0x8) /* Network Status reg */ 112 REG32(USERIO, 0xc) /* User IO reg */ 113 114 REG32(DMACFG, 0x10) /* DMA Control reg */ 115 FIELD(DMACFG, SEND_BCAST_TO_ALL_QS, 31, 1) 116 FIELD(DMACFG, DMA_ADDR_BUS_WIDTH, 30, 1) 117 FIELD(DMACFG, TX_BD_EXT_MODE_EN , 29, 1) 118 FIELD(DMACFG, RX_BD_EXT_MODE_EN , 28, 1) 119 FIELD(DMACFG, FORCE_MAX_AMBA_BURST_TX, 26, 1) 120 FIELD(DMACFG, FORCE_MAX_AMBA_BURST_RX, 25, 1) 121 FIELD(DMACFG, FORCE_DISCARD_ON_ERR, 24, 1) 122 FIELD(DMACFG, RX_BUF_SIZE, 16, 8) 123 FIELD(DMACFG, CRC_ERROR_REPORT, 13, 1) 124 FIELD(DMACFG, INF_LAST_DBUF_SIZE_EN, 12, 1) 125 FIELD(DMACFG, TX_PBUF_CSUM_OFFLOAD, 11, 1) 126 FIELD(DMACFG, TX_PBUF_SIZE, 10, 1) 127 FIELD(DMACFG, RX_PBUF_SIZE, 8, 2) 128 FIELD(DMACFG, ENDIAN_SWAP_PACKET, 7, 1) 129 FIELD(DMACFG, ENDIAN_SWAP_MGNT, 6, 1) 130 FIELD(DMACFG, HDR_DATA_SPLIT_EN, 5, 1) 131 FIELD(DMACFG, AMBA_BURST_LEN , 0, 5) 132 #define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */ 133 134 REG32(TXSTATUS, 0x14) /* TX Status reg */ 135 FIELD(TXSTATUS, TX_USED_BIT_READ_MIDFRAME, 12, 1) 136 FIELD(TXSTATUS, TX_FRAME_TOO_LARGE, 11, 1) 137 FIELD(TXSTATUS, TX_DMA_LOCKUP, 10, 1) 138 FIELD(TXSTATUS, TX_MAC_LOCKUP, 9, 1) 139 FIELD(TXSTATUS, RESP_NOT_OK, 8, 1) 140 FIELD(TXSTATUS, LATE_COLLISION, 7, 1) 141 FIELD(TXSTATUS, TRANSMIT_UNDER_RUN, 6, 1) 142 FIELD(TXSTATUS, TRANSMIT_COMPLETE, 5, 1) 143 FIELD(TXSTATUS, AMBA_ERROR, 4, 1) 144 FIELD(TXSTATUS, TRANSMIT_GO, 3, 1) 145 FIELD(TXSTATUS, RETRY_LIMIT, 2, 1) 146 FIELD(TXSTATUS, COLLISION, 1, 1) 147 FIELD(TXSTATUS, USED_BIT_READ, 0, 1) 148 149 REG32(RXQBASE, 0x18) /* RX Q Base address reg */ 150 REG32(TXQBASE, 0x1c) /* TX Q Base address reg */ 151 REG32(RXSTATUS, 0x20) /* RX Status reg */ 152 FIELD(RXSTATUS, RX_DMA_LOCKUP, 5, 1) 153 FIELD(RXSTATUS, RX_MAC_LOCKUP, 4, 1) 154 FIELD(RXSTATUS, RESP_NOT_OK, 3, 1) 155 FIELD(RXSTATUS, RECEIVE_OVERRUN, 2, 1) 156 FIELD(RXSTATUS, FRAME_RECEIVED, 1, 1) 157 FIELD(RXSTATUS, BUF_NOT_AVAILABLE, 0, 1) 158 159 REG32(ISR, 0x24) /* Interrupt Status reg */ 160 FIELD(ISR, TX_LOCKUP, 31, 1) 161 FIELD(ISR, RX_LOCKUP, 30, 1) 162 FIELD(ISR, TSU_TIMER, 29, 1) 163 FIELD(ISR, WOL, 28, 1) 164 FIELD(ISR, RECV_LPI, 27, 1) 165 FIELD(ISR, TSU_SEC_INCR, 26, 1) 166 FIELD(ISR, PTP_PDELAY_RESP_XMIT, 25, 1) 167 FIELD(ISR, PTP_PDELAY_REQ_XMIT, 24, 1) 168 FIELD(ISR, PTP_PDELAY_RESP_RECV, 23, 1) 169 FIELD(ISR, PTP_PDELAY_REQ_RECV, 22, 1) 170 FIELD(ISR, PTP_SYNC_XMIT, 21, 1) 171 FIELD(ISR, PTP_DELAY_REQ_XMIT, 20, 1) 172 FIELD(ISR, PTP_SYNC_RECV, 19, 1) 173 FIELD(ISR, PTP_DELAY_REQ_RECV, 18, 1) 174 FIELD(ISR, PCS_LP_PAGE_RECV, 17, 1) 175 FIELD(ISR, PCS_AN_COMPLETE, 16, 1) 176 FIELD(ISR, EXT_IRQ, 15, 1) 177 FIELD(ISR, PAUSE_FRAME_XMIT, 14, 1) 178 FIELD(ISR, PAUSE_TIME_ELAPSED, 13, 1) 179 FIELD(ISR, PAUSE_FRAME_RECV, 12, 1) 180 FIELD(ISR, RESP_NOT_OK, 11, 1) 181 FIELD(ISR, RECV_OVERRUN, 10, 1) 182 FIELD(ISR, LINK_CHANGE, 9, 1) 183 FIELD(ISR, USXGMII_INT, 8, 1) 184 FIELD(ISR, XMIT_COMPLETE, 7, 1) 185 FIELD(ISR, AMBA_ERROR, 6, 1) 186 FIELD(ISR, RETRY_EXCEEDED, 5, 1) 187 FIELD(ISR, XMIT_UNDER_RUN, 4, 1) 188 FIELD(ISR, TX_USED, 3, 1) 189 FIELD(ISR, RX_USED, 2, 1) 190 FIELD(ISR, RECV_COMPLETE, 1, 1) 191 FIELD(ISR, MGNT_FRAME_SENT, 0, 1) 192 REG32(IER, 0x28) /* Interrupt Enable reg */ 193 REG32(IDR, 0x2c) /* Interrupt Disable reg */ 194 REG32(IMR, 0x30) /* Interrupt Mask reg */ 195 196 REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */ 197 FIELD(PHYMNTNC, DATA, 0, 16) 198 FIELD(PHYMNTNC, REG_ADDR, 18, 5) 199 FIELD(PHYMNTNC, PHY_ADDR, 23, 5) 200 FIELD(PHYMNTNC, OP, 28, 2) 201 FIELD(PHYMNTNC, ST, 30, 2) 202 #define MDIO_OP_READ 0x2 203 #define MDIO_OP_WRITE 0x1 204 205 REG32(RXPAUSE, 0x38) /* RX Pause Time reg */ 206 REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */ 207 REG32(TXPARTIALSF, 0x40) /* TX Partial Store and Forward */ 208 REG32(RXPARTIALSF, 0x44) /* RX Partial Store and Forward */ 209 REG32(JUMBO_MAX_LEN, 0x48) /* Max Jumbo Frame Size */ 210 REG32(HASHLO, 0x80) /* Hash Low address reg */ 211 REG32(HASHHI, 0x84) /* Hash High address reg */ 212 REG32(SPADDR1LO, 0x88) /* Specific addr 1 low reg */ 213 REG32(SPADDR1HI, 0x8c) /* Specific addr 1 high reg */ 214 REG32(SPADDR2LO, 0x90) /* Specific addr 2 low reg */ 215 REG32(SPADDR2HI, 0x94) /* Specific addr 2 high reg */ 216 REG32(SPADDR3LO, 0x98) /* Specific addr 3 low reg */ 217 REG32(SPADDR3HI, 0x9c) /* Specific addr 3 high reg */ 218 REG32(SPADDR4LO, 0xa0) /* Specific addr 4 low reg */ 219 REG32(SPADDR4HI, 0xa4) /* Specific addr 4 high reg */ 220 REG32(TIDMATCH1, 0xa8) /* Type ID1 Match reg */ 221 REG32(TIDMATCH2, 0xac) /* Type ID2 Match reg */ 222 REG32(TIDMATCH3, 0xb0) /* Type ID3 Match reg */ 223 REG32(TIDMATCH4, 0xb4) /* Type ID4 Match reg */ 224 REG32(WOLAN, 0xb8) /* Wake on LAN reg */ 225 REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */ 226 REG32(SVLAN, 0xc0) /* Stacked VLAN reg */ 227 REG32(MODID, 0xfc) /* Module ID reg */ 228 REG32(OCTTXLO, 0x100) /* Octets transmitted Low reg */ 229 REG32(OCTTXHI, 0x104) /* Octets transmitted High reg */ 230 REG32(TXCNT, 0x108) /* Error-free Frames transmitted */ 231 REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */ 232 REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */ 233 REG32(TXPAUSECNT, 0x114) /* Pause Frames Transmitted */ 234 REG32(TX64CNT, 0x118) /* Error-free 64 TX */ 235 REG32(TX65CNT, 0x11c) /* Error-free 65-127 TX */ 236 REG32(TX128CNT, 0x120) /* Error-free 128-255 TX */ 237 REG32(TX256CNT, 0x124) /* Error-free 256-511 */ 238 REG32(TX512CNT, 0x128) /* Error-free 512-1023 TX */ 239 REG32(TX1024CNT, 0x12c) /* Error-free 1024-1518 TX */ 240 REG32(TX1519CNT, 0x130) /* Error-free larger than 1519 TX */ 241 REG32(TXURUNCNT, 0x134) /* TX under run error counter */ 242 REG32(SINGLECOLLCNT, 0x138) /* Single Collision Frames */ 243 REG32(MULTCOLLCNT, 0x13c) /* Multiple Collision Frames */ 244 REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */ 245 REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */ 246 REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */ 247 REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */ 248 REG32(OCTRXLO, 0x150) /* Octets Received register Low */ 249 REG32(OCTRXHI, 0x154) /* Octets Received register High */ 250 REG32(RXCNT, 0x158) /* Error-free Frames Received */ 251 REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */ 252 REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */ 253 REG32(RXPAUSECNT, 0x164) /* Pause Frames Received Counter */ 254 REG32(RX64CNT, 0x168) /* Error-free 64 byte Frames RX */ 255 REG32(RX65CNT, 0x16c) /* Error-free 65-127B Frames RX */ 256 REG32(RX128CNT, 0x170) /* Error-free 128-255B Frames RX */ 257 REG32(RX256CNT, 0x174) /* Error-free 256-512B Frames RX */ 258 REG32(RX512CNT, 0x178) /* Error-free 512-1023B Frames RX */ 259 REG32(RX1024CNT, 0x17c) /* Error-free 1024-1518B Frames RX */ 260 REG32(RX1519CNT, 0x180) /* Error-free 1519-max Frames RX */ 261 REG32(RXUNDERCNT, 0x184) /* Undersize Frames Received */ 262 REG32(RXOVERCNT, 0x188) /* Oversize Frames Received */ 263 REG32(RXJABCNT, 0x18c) /* Jabbers Received Counter */ 264 REG32(RXFCSCNT, 0x190) /* Frame Check seq. Error Counter */ 265 REG32(RXLENERRCNT, 0x194) /* Length Field Error Counter */ 266 REG32(RXSYMERRCNT, 0x198) /* Symbol Error Counter */ 267 REG32(RXALIGNERRCNT, 0x19c) /* Alignment Error Counter */ 268 REG32(RXRSCERRCNT, 0x1a0) /* Receive Resource Error Counter */ 269 REG32(RXORUNCNT, 0x1a4) /* Receive Overrun Counter */ 270 REG32(RXIPCSERRCNT, 0x1a8) /* IP header Checksum Err Counter */ 271 REG32(RXTCPCCNT, 0x1ac) /* TCP Checksum Error Counter */ 272 REG32(RXUDPCCNT, 0x1b0) /* UDP Checksum Error Counter */ 273 274 REG32(1588S, 0x1d0) /* 1588 Timer Seconds */ 275 REG32(1588NS, 0x1d4) /* 1588 Timer Nanoseconds */ 276 REG32(1588ADJ, 0x1d8) /* 1588 Timer Adjust */ 277 REG32(1588INC, 0x1dc) /* 1588 Timer Increment */ 278 REG32(PTPETXS, 0x1e0) /* PTP Event Frame Transmitted (s) */ 279 REG32(PTPETXNS, 0x1e4) /* PTP Event Frame Transmitted (ns) */ 280 REG32(PTPERXS, 0x1e8) /* PTP Event Frame Received (s) */ 281 REG32(PTPERXNS, 0x1ec) /* PTP Event Frame Received (ns) */ 282 REG32(PTPPTXS, 0x1e0) /* PTP Peer Frame Transmitted (s) */ 283 REG32(PTPPTXNS, 0x1e4) /* PTP Peer Frame Transmitted (ns) */ 284 REG32(PTPPRXS, 0x1e8) /* PTP Peer Frame Received (s) */ 285 REG32(PTPPRXNS, 0x1ec) /* PTP Peer Frame Received (ns) */ 286 287 /* Design Configuration Registers */ 288 REG32(DESCONF, 0x280) 289 REG32(DESCONF2, 0x284) 290 REG32(DESCONF3, 0x288) 291 REG32(DESCONF4, 0x28c) 292 REG32(DESCONF5, 0x290) 293 REG32(DESCONF6, 0x294) 294 FIELD(DESCONF6, DMA_ADDR_64B, 23, 1) 295 REG32(DESCONF7, 0x298) 296 297 REG32(INT_Q1_STATUS, 0x400) 298 REG32(INT_Q1_MASK, 0x640) 299 300 REG32(TRANSMIT_Q1_PTR, 0x440) 301 REG32(TRANSMIT_Q7_PTR, 0x458) 302 303 REG32(RECEIVE_Q1_PTR, 0x480) 304 REG32(RECEIVE_Q7_PTR, 0x498) 305 306 REG32(TBQPH, 0x4c8) 307 REG32(RBQPH, 0x4d4) 308 309 REG32(INT_Q1_ENABLE, 0x600) 310 REG32(INT_Q7_ENABLE, 0x618) 311 312 REG32(INT_Q1_DISABLE, 0x620) 313 REG32(INT_Q7_DISABLE, 0x638) 314 315 REG32(SCREENING_TYPE1_REG0, 0x500) 316 FIELD(SCREENING_TYPE1_REG0, QUEUE_NUM, 0, 4) 317 FIELD(SCREENING_TYPE1_REG0, DSTC_MATCH, 4, 8) 318 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH, 12, 16) 319 FIELD(SCREENING_TYPE1_REG0, DSTC_ENABLE, 28, 1) 320 FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN, 29, 1) 321 FIELD(SCREENING_TYPE1_REG0, DROP_ON_MATCH, 30, 1) 322 323 REG32(SCREENING_TYPE2_REG0, 0x540) 324 FIELD(SCREENING_TYPE2_REG0, QUEUE_NUM, 0, 4) 325 FIELD(SCREENING_TYPE2_REG0, VLAN_PRIORITY, 4, 3) 326 FIELD(SCREENING_TYPE2_REG0, VLAN_ENABLE, 8, 1) 327 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_REG_INDEX, 9, 3) 328 FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE, 12, 1) 329 FIELD(SCREENING_TYPE2_REG0, COMPARE_A, 13, 5) 330 FIELD(SCREENING_TYPE2_REG0, COMPARE_A_ENABLE, 18, 1) 331 FIELD(SCREENING_TYPE2_REG0, COMPARE_B, 19, 5) 332 FIELD(SCREENING_TYPE2_REG0, COMPARE_B_ENABLE, 24, 1) 333 FIELD(SCREENING_TYPE2_REG0, COMPARE_C, 25, 5) 334 FIELD(SCREENING_TYPE2_REG0, COMPARE_C_ENABLE, 30, 1) 335 FIELD(SCREENING_TYPE2_REG0, DROP_ON_MATCH, 31, 1) 336 337 REG32(SCREENING_TYPE2_ETHERTYPE_REG0, 0x6e0) 338 339 REG32(TYPE2_COMPARE_0_WORD_0, 0x700) 340 FIELD(TYPE2_COMPARE_0_WORD_0, MASK_VALUE, 0, 16) 341 FIELD(TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE, 16, 16) 342 343 REG32(TYPE2_COMPARE_0_WORD_1, 0x704) 344 FIELD(TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE, 0, 7) 345 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET, 7, 2) 346 FIELD(TYPE2_COMPARE_0_WORD_1, DISABLE_MASK, 9, 1) 347 FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_VLAN_ID, 10, 1) 348 349 /*****************************************/ 350 351 352 353 /* Marvell PHY definitions */ 354 #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */ 355 356 #define PHY_REG_CONTROL 0 357 #define PHY_REG_STATUS 1 358 #define PHY_REG_PHYID1 2 359 #define PHY_REG_PHYID2 3 360 #define PHY_REG_ANEGADV 4 361 #define PHY_REG_LINKPABIL 5 362 #define PHY_REG_ANEGEXP 6 363 #define PHY_REG_NEXTP 7 364 #define PHY_REG_LINKPNEXTP 8 365 #define PHY_REG_100BTCTRL 9 366 #define PHY_REG_1000BTSTAT 10 367 #define PHY_REG_EXTSTAT 15 368 #define PHY_REG_PHYSPCFC_CTL 16 369 #define PHY_REG_PHYSPCFC_ST 17 370 #define PHY_REG_INT_EN 18 371 #define PHY_REG_INT_ST 19 372 #define PHY_REG_EXT_PHYSPCFC_CTL 20 373 #define PHY_REG_RXERR 21 374 #define PHY_REG_EACD 22 375 #define PHY_REG_LED 24 376 #define PHY_REG_LED_OVRD 25 377 #define PHY_REG_EXT_PHYSPCFC_CTL2 26 378 #define PHY_REG_EXT_PHYSPCFC_ST 27 379 #define PHY_REG_CABLE_DIAG 28 380 381 #define PHY_REG_CONTROL_RST 0x8000 382 #define PHY_REG_CONTROL_LOOP 0x4000 383 #define PHY_REG_CONTROL_ANEG 0x1000 384 #define PHY_REG_CONTROL_ANRESTART 0x0200 385 386 #define PHY_REG_STATUS_LINK 0x0004 387 #define PHY_REG_STATUS_ANEGCMPL 0x0020 388 389 #define PHY_REG_INT_ST_ANEGCMPL 0x0800 390 #define PHY_REG_INT_ST_LINKC 0x0400 391 #define PHY_REG_INT_ST_ENERGY 0x0010 392 393 /***********************************************************************/ 394 #define GEM_RX_REJECT (-1) 395 #define GEM_RX_PROMISCUOUS_ACCEPT (-2) 396 #define GEM_RX_BROADCAST_ACCEPT (-3) 397 #define GEM_RX_MULTICAST_HASH_ACCEPT (-4) 398 #define GEM_RX_UNICAST_HASH_ACCEPT (-5) 399 400 #define GEM_RX_SAR_ACCEPT 0 401 402 /***********************************************************************/ 403 404 #define DESC_1_USED 0x80000000 405 #define DESC_1_LENGTH 0x00001FFF 406 407 #define DESC_1_TX_WRAP 0x40000000 408 #define DESC_1_TX_LAST 0x00008000 409 410 #define DESC_0_RX_WRAP 0x00000002 411 #define DESC_0_RX_OWNERSHIP 0x00000001 412 413 #define R_DESC_1_RX_SAR_SHIFT 25 414 #define R_DESC_1_RX_SAR_LENGTH 2 415 #define R_DESC_1_RX_SAR_MATCH (1 << 27) 416 #define R_DESC_1_RX_UNICAST_HASH (1 << 29) 417 #define R_DESC_1_RX_MULTICAST_HASH (1 << 30) 418 #define R_DESC_1_RX_BROADCAST (1 << 31) 419 420 #define DESC_1_RX_SOF 0x00004000 421 #define DESC_1_RX_EOF 0x00008000 422 423 #define GEM_MODID_VALUE 0x00020118 424 425 static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) 426 { 427 uint64_t ret = desc[0]; 428 429 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 430 ret |= (uint64_t)desc[2] << 32; 431 } 432 return ret; 433 } 434 435 static inline unsigned tx_desc_get_used(uint32_t *desc) 436 { 437 return (desc[1] & DESC_1_USED) ? 1 : 0; 438 } 439 440 static inline void tx_desc_set_used(uint32_t *desc) 441 { 442 desc[1] |= DESC_1_USED; 443 } 444 445 static inline unsigned tx_desc_get_wrap(uint32_t *desc) 446 { 447 return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0; 448 } 449 450 static inline unsigned tx_desc_get_last(uint32_t *desc) 451 { 452 return (desc[1] & DESC_1_TX_LAST) ? 1 : 0; 453 } 454 455 static inline unsigned tx_desc_get_length(uint32_t *desc) 456 { 457 return desc[1] & DESC_1_LENGTH; 458 } 459 460 static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue) 461 { 462 DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue); 463 DB_PRINT("bufaddr: 0x%08x\n", *desc); 464 DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc)); 465 DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc)); 466 DB_PRINT("last: %d\n", tx_desc_get_last(desc)); 467 DB_PRINT("length: %d\n", tx_desc_get_length(desc)); 468 } 469 470 static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) 471 { 472 uint64_t ret = desc[0] & ~0x3UL; 473 474 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 475 ret |= (uint64_t)desc[2] << 32; 476 } 477 return ret; 478 } 479 480 static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx) 481 { 482 int ret = 2; 483 484 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 485 ret += 2; 486 } 487 if (s->regs[R_DMACFG] & (rx_n_tx ? R_DMACFG_RX_BD_EXT_MODE_EN_MASK 488 : R_DMACFG_TX_BD_EXT_MODE_EN_MASK)) { 489 ret += 2; 490 } 491 492 assert(ret <= DESC_MAX_NUM_WORDS); 493 return ret; 494 } 495 496 static inline unsigned rx_desc_get_wrap(uint32_t *desc) 497 { 498 return desc[0] & DESC_0_RX_WRAP ? 1 : 0; 499 } 500 501 static inline unsigned rx_desc_get_ownership(uint32_t *desc) 502 { 503 return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0; 504 } 505 506 static inline void rx_desc_set_ownership(uint32_t *desc) 507 { 508 desc[0] |= DESC_0_RX_OWNERSHIP; 509 } 510 511 static inline void rx_desc_set_sof(uint32_t *desc) 512 { 513 desc[1] |= DESC_1_RX_SOF; 514 } 515 516 static inline void rx_desc_clear_control(uint32_t *desc) 517 { 518 desc[1] = 0; 519 } 520 521 static inline void rx_desc_set_eof(uint32_t *desc) 522 { 523 desc[1] |= DESC_1_RX_EOF; 524 } 525 526 static inline void rx_desc_set_length(uint32_t *desc, unsigned len) 527 { 528 desc[1] &= ~DESC_1_LENGTH; 529 desc[1] |= len; 530 } 531 532 static inline void rx_desc_set_broadcast(uint32_t *desc) 533 { 534 desc[1] |= R_DESC_1_RX_BROADCAST; 535 } 536 537 static inline void rx_desc_set_unicast_hash(uint32_t *desc) 538 { 539 desc[1] |= R_DESC_1_RX_UNICAST_HASH; 540 } 541 542 static inline void rx_desc_set_multicast_hash(uint32_t *desc) 543 { 544 desc[1] |= R_DESC_1_RX_MULTICAST_HASH; 545 } 546 547 static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx) 548 { 549 desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH, 550 sar_idx); 551 desc[1] |= R_DESC_1_RX_SAR_MATCH; 552 } 553 554 /* The broadcast MAC address: 0xFFFFFFFFFFFF */ 555 static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 556 557 static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) 558 { 559 uint32_t size; 560 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, JUMBO_FRAMES)) { 561 size = s->regs[R_JUMBO_MAX_LEN]; 562 if (size > s->jumbo_max_len) { 563 size = s->jumbo_max_len; 564 qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be" 565 " greater than 0x%" PRIx32 "\n", s->jumbo_max_len); 566 } 567 } else if (tx) { 568 size = 1518; 569 } else { 570 size = FIELD_EX32(s->regs[R_NWCFG], 571 NWCFG, RECV_1536_BYTE_FRAMES) ? 1538 : 1518; 572 } 573 return size; 574 } 575 576 static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag) 577 { 578 if (q == 0) { 579 s->regs[R_ISR] |= flag & ~(s->regs[R_IMR]); 580 } else { 581 s->regs[R_INT_Q1_STATUS + q - 1] |= flag & 582 ~(s->regs[R_INT_Q1_MASK + q - 1]); 583 } 584 } 585 586 /* 587 * gem_init_register_masks: 588 * One time initialization. 589 * Set masks to identify which register bits have magical clear properties 590 */ 591 static void gem_init_register_masks(CadenceGEMState *s) 592 { 593 unsigned int i; 594 /* Mask of register bits which are read only */ 595 memset(&s->regs_ro[0], 0, sizeof(s->regs_ro)); 596 s->regs_ro[R_NWCTRL] = 0xFFF80000; 597 s->regs_ro[R_NWSTATUS] = 0xFFFFFFFF; 598 s->regs_ro[R_DMACFG] = 0x8E00F000; 599 s->regs_ro[R_TXSTATUS] = 0xFFFFFE08; 600 s->regs_ro[R_RXQBASE] = 0x00000003; 601 s->regs_ro[R_TXQBASE] = 0x00000003; 602 s->regs_ro[R_RXSTATUS] = 0xFFFFFFF0; 603 s->regs_ro[R_ISR] = 0xFFFFFFFF; 604 s->regs_ro[R_IMR] = 0xFFFFFFFF; 605 s->regs_ro[R_MODID] = 0xFFFFFFFF; 606 for (i = 0; i < s->num_priority_queues; i++) { 607 s->regs_ro[R_INT_Q1_STATUS + i] = 0xFFFFFFFF; 608 s->regs_ro[R_INT_Q1_ENABLE + i] = 0xFFFFF319; 609 s->regs_ro[R_INT_Q1_DISABLE + i] = 0xFFFFF319; 610 s->regs_ro[R_INT_Q1_MASK + i] = 0xFFFFFFFF; 611 } 612 613 /* Mask of register bits which are clear on read */ 614 memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc)); 615 s->regs_rtc[R_ISR] = 0xFFFFFFFF; 616 for (i = 0; i < s->num_priority_queues; i++) { 617 s->regs_rtc[R_INT_Q1_STATUS + i] = 0x00000CE6; 618 } 619 620 /* Mask of register bits which are write 1 to clear */ 621 memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c)); 622 s->regs_w1c[R_TXSTATUS] = 0x000001F7; 623 s->regs_w1c[R_RXSTATUS] = 0x0000000F; 624 625 /* Mask of register bits which are write only */ 626 memset(&s->regs_wo[0], 0, sizeof(s->regs_wo)); 627 s->regs_wo[R_NWCTRL] = 0x00073E60; 628 s->regs_wo[R_IER] = 0x07FFFFFF; 629 s->regs_wo[R_IDR] = 0x07FFFFFF; 630 for (i = 0; i < s->num_priority_queues; i++) { 631 s->regs_wo[R_INT_Q1_ENABLE + i] = 0x00000CE6; 632 s->regs_wo[R_INT_Q1_DISABLE + i] = 0x00000CE6; 633 } 634 } 635 636 /* 637 * phy_update_link: 638 * Make the emulated PHY link state match the QEMU "interface" state. 639 */ 640 static void phy_update_link(CadenceGEMState *s) 641 { 642 DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down); 643 644 /* Autonegotiation status mirrors link status. */ 645 if (qemu_get_queue(s->nic)->link_down) { 646 s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL | 647 PHY_REG_STATUS_LINK); 648 s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC; 649 } else { 650 s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL | 651 PHY_REG_STATUS_LINK); 652 s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC | 653 PHY_REG_INT_ST_ANEGCMPL | 654 PHY_REG_INT_ST_ENERGY); 655 } 656 } 657 658 static bool gem_can_receive(NetClientState *nc) 659 { 660 CadenceGEMState *s; 661 int i; 662 663 s = qemu_get_nic_opaque(nc); 664 665 /* Do nothing if receive is not enabled. */ 666 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_RECEIVE)) { 667 if (s->can_rx_state != 1) { 668 s->can_rx_state = 1; 669 DB_PRINT("can't receive - no enable\n"); 670 } 671 return false; 672 } 673 674 for (i = 0; i < s->num_priority_queues; i++) { 675 if (rx_desc_get_ownership(s->rx_desc[i]) != 1) { 676 break; 677 } 678 }; 679 680 if (i == s->num_priority_queues) { 681 if (s->can_rx_state != 2) { 682 s->can_rx_state = 2; 683 DB_PRINT("can't receive - all the buffer descriptors are busy\n"); 684 } 685 return false; 686 } 687 688 if (s->can_rx_state != 0) { 689 s->can_rx_state = 0; 690 DB_PRINT("can receive\n"); 691 } 692 return true; 693 } 694 695 /* 696 * gem_update_int_status: 697 * Raise or lower interrupt based on current status. 698 */ 699 static void gem_update_int_status(CadenceGEMState *s) 700 { 701 int i; 702 703 qemu_set_irq(s->irq[0], !!s->regs[R_ISR]); 704 705 for (i = 1; i < s->num_priority_queues; ++i) { 706 qemu_set_irq(s->irq[i], !!s->regs[R_INT_Q1_STATUS + i - 1]); 707 } 708 } 709 710 /* 711 * gem_receive_updatestats: 712 * Increment receive statistics. 713 */ 714 static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet, 715 unsigned bytes) 716 { 717 uint64_t octets; 718 719 /* Total octets (bytes) received */ 720 octets = ((uint64_t)(s->regs[R_OCTRXLO]) << 32) | 721 s->regs[R_OCTRXHI]; 722 octets += bytes; 723 s->regs[R_OCTRXLO] = octets >> 32; 724 s->regs[R_OCTRXHI] = octets; 725 726 /* Error-free Frames received */ 727 s->regs[R_RXCNT]++; 728 729 /* Error-free Broadcast Frames counter */ 730 if (!memcmp(packet, broadcast_addr, 6)) { 731 s->regs[R_RXBROADCNT]++; 732 } 733 734 /* Error-free Multicast Frames counter */ 735 if (packet[0] == 0x01) { 736 s->regs[R_RXMULTICNT]++; 737 } 738 739 if (bytes <= 64) { 740 s->regs[R_RX64CNT]++; 741 } else if (bytes <= 127) { 742 s->regs[R_RX65CNT]++; 743 } else if (bytes <= 255) { 744 s->regs[R_RX128CNT]++; 745 } else if (bytes <= 511) { 746 s->regs[R_RX256CNT]++; 747 } else if (bytes <= 1023) { 748 s->regs[R_RX512CNT]++; 749 } else if (bytes <= 1518) { 750 s->regs[R_RX1024CNT]++; 751 } else { 752 s->regs[R_RX1519CNT]++; 753 } 754 } 755 756 /* 757 * Get the MAC Address bit from the specified position 758 */ 759 static unsigned get_bit(const uint8_t *mac, unsigned bit) 760 { 761 unsigned byte; 762 763 byte = mac[bit / 8]; 764 byte >>= (bit & 0x7); 765 byte &= 1; 766 767 return byte; 768 } 769 770 /* 771 * Calculate a GEM MAC Address hash index 772 */ 773 static unsigned calc_mac_hash(const uint8_t *mac) 774 { 775 int index_bit, mac_bit; 776 unsigned hash_index; 777 778 hash_index = 0; 779 mac_bit = 5; 780 for (index_bit = 5; index_bit >= 0; index_bit--) { 781 hash_index |= (get_bit(mac, mac_bit) ^ 782 get_bit(mac, mac_bit + 6) ^ 783 get_bit(mac, mac_bit + 12) ^ 784 get_bit(mac, mac_bit + 18) ^ 785 get_bit(mac, mac_bit + 24) ^ 786 get_bit(mac, mac_bit + 30) ^ 787 get_bit(mac, mac_bit + 36) ^ 788 get_bit(mac, mac_bit + 42)) << index_bit; 789 mac_bit--; 790 } 791 792 return hash_index; 793 } 794 795 /* 796 * gem_mac_address_filter: 797 * Accept or reject this destination address? 798 * Returns: 799 * GEM_RX_REJECT: reject 800 * >= 0: Specific address accept (which matched SAR is returned) 801 * others for various other modes of accept: 802 * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT, 803 * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT 804 */ 805 static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) 806 { 807 uint8_t *gem_spaddr; 808 int i, is_mc; 809 810 /* Promiscuous mode? */ 811 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, PROMISC)) { 812 return GEM_RX_PROMISCUOUS_ACCEPT; 813 } 814 815 if (!memcmp(packet, broadcast_addr, 6)) { 816 /* Reject broadcast packets? */ 817 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, NO_BROADCAST)) { 818 return GEM_RX_REJECT; 819 } 820 return GEM_RX_BROADCAST_ACCEPT; 821 } 822 823 /* Accept packets -w- hash match? */ 824 is_mc = is_multicast_ether_addr(packet); 825 if ((is_mc && (FIELD_EX32(s->regs[R_NWCFG], NWCFG, MULTICAST_HASH_EN))) || 826 (!is_mc && FIELD_EX32(s->regs[R_NWCFG], NWCFG, UNICAST_HASH_EN))) { 827 uint64_t buckets; 828 unsigned hash_index; 829 830 hash_index = calc_mac_hash(packet); 831 buckets = ((uint64_t)s->regs[R_HASHHI] << 32) | s->regs[R_HASHLO]; 832 if ((buckets >> hash_index) & 1) { 833 return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT 834 : GEM_RX_UNICAST_HASH_ACCEPT; 835 } 836 } 837 838 /* Check all 4 specific addresses */ 839 gem_spaddr = (uint8_t *)&(s->regs[R_SPADDR1LO]); 840 for (i = 3; i >= 0; i--) { 841 if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) { 842 return GEM_RX_SAR_ACCEPT + i; 843 } 844 } 845 846 /* No address match; reject the packet */ 847 return GEM_RX_REJECT; 848 } 849 850 /* Figure out which queue the received data should be sent to */ 851 static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, 852 unsigned rxbufsize) 853 { 854 uint32_t reg; 855 bool matched, mismatched; 856 int i, j; 857 858 for (i = 0; i < s->num_type1_screeners; i++) { 859 reg = s->regs[R_SCREENING_TYPE1_REG0 + i]; 860 matched = false; 861 mismatched = false; 862 863 /* Screening is based on UDP Port */ 864 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN)) { 865 uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23]; 866 if (udp_port == FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH)) { 867 matched = true; 868 } else { 869 mismatched = true; 870 } 871 } 872 873 /* Screening is based on DS/TC */ 874 if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_ENABLE)) { 875 uint8_t dscp = rxbuf_ptr[14 + 1]; 876 if (dscp == FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_MATCH)) { 877 matched = true; 878 } else { 879 mismatched = true; 880 } 881 } 882 883 if (matched && !mismatched) { 884 return FIELD_EX32(reg, SCREENING_TYPE1_REG0, QUEUE_NUM); 885 } 886 } 887 888 for (i = 0; i < s->num_type2_screeners; i++) { 889 reg = s->regs[R_SCREENING_TYPE2_REG0 + i]; 890 matched = false; 891 mismatched = false; 892 893 if (FIELD_EX32(reg, SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE)) { 894 uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13]; 895 int et_idx = FIELD_EX32(reg, SCREENING_TYPE2_REG0, 896 ETHERTYPE_REG_INDEX); 897 898 if (et_idx > s->num_type2_screeners) { 899 qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype " 900 "register index: %d\n", et_idx); 901 } 902 if (type == s->regs[R_SCREENING_TYPE2_ETHERTYPE_REG0 + 903 et_idx]) { 904 matched = true; 905 } else { 906 mismatched = true; 907 } 908 } 909 910 /* Compare A, B, C */ 911 for (j = 0; j < 3; j++) { 912 uint32_t cr0, cr1, mask, compare; 913 uint16_t rx_cmp; 914 int offset; 915 int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6, 916 R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH); 917 918 if (!extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_SHIFT + j * 6, 919 R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_LENGTH)) { 920 continue; 921 } 922 923 if (cr_idx > s->num_type2_screeners) { 924 qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare " 925 "register index: %d\n", cr_idx); 926 } 927 928 cr0 = s->regs[R_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2]; 929 cr1 = s->regs[R_TYPE2_COMPARE_0_WORD_1 + cr_idx * 2]; 930 offset = FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE); 931 932 switch (FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET)) { 933 case 3: /* Skip UDP header */ 934 qemu_log_mask(LOG_UNIMP, "TCP compare offsets" 935 "unimplemented - assuming UDP\n"); 936 offset += 8; 937 /* Fallthrough */ 938 case 2: /* skip the IP header */ 939 offset += 20; 940 /* Fallthrough */ 941 case 1: /* Count from after the ethertype */ 942 offset += 14; 943 break; 944 case 0: 945 /* Offset from start of frame */ 946 break; 947 } 948 949 rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset]; 950 mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE); 951 compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE); 952 953 if ((rx_cmp & mask) == (compare & mask)) { 954 matched = true; 955 } else { 956 mismatched = true; 957 } 958 } 959 960 if (matched && !mismatched) { 961 return FIELD_EX32(reg, SCREENING_TYPE2_REG0, QUEUE_NUM); 962 } 963 } 964 965 /* We made it here, assume it's queue 0 */ 966 return 0; 967 } 968 969 static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q) 970 { 971 uint32_t base_addr = 0; 972 973 switch (q) { 974 case 0: 975 base_addr = s->regs[tx ? R_TXQBASE : R_RXQBASE]; 976 break; 977 case 1 ... (MAX_PRIORITY_QUEUES - 1): 978 base_addr = s->regs[(tx ? R_TRANSMIT_Q1_PTR : 979 R_RECEIVE_Q1_PTR) + q - 1]; 980 break; 981 default: 982 g_assert_not_reached(); 983 }; 984 985 return base_addr; 986 } 987 988 static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q) 989 { 990 return gem_get_queue_base_addr(s, true, q); 991 } 992 993 static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q) 994 { 995 return gem_get_queue_base_addr(s, false, q); 996 } 997 998 static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q) 999 { 1000 hwaddr desc_addr = 0; 1001 1002 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 1003 desc_addr = s->regs[tx ? R_TBQPH : R_RBQPH]; 1004 } 1005 desc_addr <<= 32; 1006 desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q]; 1007 return desc_addr; 1008 } 1009 1010 static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q) 1011 { 1012 return gem_get_desc_addr(s, true, q); 1013 } 1014 1015 static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q) 1016 { 1017 return gem_get_desc_addr(s, false, q); 1018 } 1019 1020 static void gem_get_rx_desc(CadenceGEMState *s, int q) 1021 { 1022 hwaddr desc_addr = gem_get_rx_desc_addr(s, q); 1023 1024 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr); 1025 1026 /* read current descriptor */ 1027 address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, 1028 s->rx_desc[q], 1029 sizeof(uint32_t) * gem_get_desc_len(s, true)); 1030 1031 /* Descriptor owned by software ? */ 1032 if (rx_desc_get_ownership(s->rx_desc[q]) == 1) { 1033 DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr); 1034 s->regs[R_RXSTATUS] |= R_RXSTATUS_BUF_NOT_AVAILABLE_MASK; 1035 gem_set_isr(s, q, R_ISR_RX_USED_MASK); 1036 /* Handle interrupt consequences */ 1037 gem_update_int_status(s); 1038 } 1039 } 1040 1041 /* 1042 * gem_receive: 1043 * Fit a packet handed to us by QEMU into the receive descriptor ring. 1044 */ 1045 static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) 1046 { 1047 CadenceGEMState *s = qemu_get_nic_opaque(nc); 1048 unsigned rxbufsize, bytes_to_copy; 1049 unsigned rxbuf_offset; 1050 uint8_t *rxbuf_ptr; 1051 bool first_desc = true; 1052 int maf; 1053 int q = 0; 1054 1055 /* Is this destination MAC address "for us" ? */ 1056 maf = gem_mac_address_filter(s, buf); 1057 if (maf == GEM_RX_REJECT) { 1058 return size; /* no, drop silently b/c it's not an error */ 1059 } 1060 1061 /* Discard packets with receive length error enabled ? */ 1062 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, LEN_ERR_DISCARD)) { 1063 unsigned type_len; 1064 1065 /* Fish the ethertype / length field out of the RX packet */ 1066 type_len = buf[12] << 8 | buf[13]; 1067 /* It is a length field, not an ethertype */ 1068 if (type_len < 0x600) { 1069 if (size < type_len) { 1070 /* discard */ 1071 return -1; 1072 } 1073 } 1074 } 1075 1076 /* 1077 * Determine configured receive buffer offset (probably 0) 1078 */ 1079 rxbuf_offset = FIELD_EX32(s->regs[R_NWCFG], NWCFG, RECV_BUF_OFFSET); 1080 1081 /* The configure size of each receive buffer. Determines how many 1082 * buffers needed to hold this packet. 1083 */ 1084 rxbufsize = FIELD_EX32(s->regs[R_DMACFG], DMACFG, RX_BUF_SIZE); 1085 rxbufsize *= GEM_DMACFG_RBUFSZ_MUL; 1086 1087 bytes_to_copy = size; 1088 1089 /* Hardware allows a zero value here but warns against it. To avoid QEMU 1090 * indefinite loops we enforce a minimum value here 1091 */ 1092 if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) { 1093 rxbufsize = GEM_DMACFG_RBUFSZ_MUL; 1094 } 1095 1096 /* Pad to minimum length. Assume FCS field is stripped, logic 1097 * below will increment it to the real minimum of 64 when 1098 * not FCS stripping 1099 */ 1100 if (size < 60) { 1101 size = 60; 1102 } 1103 1104 /* Strip of FCS field ? (usually yes) */ 1105 if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, FCS_REMOVE)) { 1106 rxbuf_ptr = (void *)buf; 1107 } else { 1108 uint32_t crc_val; 1109 1110 if (size > MAX_FRAME_SIZE - sizeof(crc_val)) { 1111 size = MAX_FRAME_SIZE - sizeof(crc_val); 1112 } 1113 bytes_to_copy = size; 1114 /* The application wants the FCS field, which QEMU does not provide. 1115 * We must try and calculate one. 1116 */ 1117 1118 memcpy(s->rx_packet, buf, size); 1119 memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size); 1120 rxbuf_ptr = s->rx_packet; 1121 crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60))); 1122 memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val)); 1123 1124 bytes_to_copy += 4; 1125 size += 4; 1126 } 1127 1128 DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size); 1129 1130 /* Find which queue we are targeting */ 1131 q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize); 1132 1133 if (size > gem_get_max_buf_len(s, false)) { 1134 qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n"); 1135 gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK); 1136 return -1; 1137 } 1138 1139 while (bytes_to_copy) { 1140 hwaddr desc_addr; 1141 1142 /* Do nothing if receive is not enabled. */ 1143 if (!gem_can_receive(nc)) { 1144 return -1; 1145 } 1146 1147 DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n", 1148 MIN(bytes_to_copy, rxbufsize), 1149 rx_desc_get_buffer(s, s->rx_desc[q])); 1150 1151 /* Copy packet data to emulated DMA buffer */ 1152 address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) + 1153 rxbuf_offset, 1154 MEMTXATTRS_UNSPECIFIED, rxbuf_ptr, 1155 MIN(bytes_to_copy, rxbufsize)); 1156 rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); 1157 bytes_to_copy -= MIN(bytes_to_copy, rxbufsize); 1158 1159 rx_desc_clear_control(s->rx_desc[q]); 1160 1161 /* Update the descriptor. */ 1162 if (first_desc) { 1163 rx_desc_set_sof(s->rx_desc[q]); 1164 first_desc = false; 1165 } 1166 if (bytes_to_copy == 0) { 1167 rx_desc_set_eof(s->rx_desc[q]); 1168 rx_desc_set_length(s->rx_desc[q], size); 1169 } 1170 rx_desc_set_ownership(s->rx_desc[q]); 1171 1172 switch (maf) { 1173 case GEM_RX_PROMISCUOUS_ACCEPT: 1174 break; 1175 case GEM_RX_BROADCAST_ACCEPT: 1176 rx_desc_set_broadcast(s->rx_desc[q]); 1177 break; 1178 case GEM_RX_UNICAST_HASH_ACCEPT: 1179 rx_desc_set_unicast_hash(s->rx_desc[q]); 1180 break; 1181 case GEM_RX_MULTICAST_HASH_ACCEPT: 1182 rx_desc_set_multicast_hash(s->rx_desc[q]); 1183 break; 1184 case GEM_RX_REJECT: 1185 abort(); 1186 default: /* SAR */ 1187 rx_desc_set_sar(s->rx_desc[q], maf); 1188 } 1189 1190 /* Descriptor write-back. */ 1191 desc_addr = gem_get_rx_desc_addr(s, q); 1192 address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, 1193 s->rx_desc[q], 1194 sizeof(uint32_t) * gem_get_desc_len(s, true)); 1195 1196 /* Next descriptor */ 1197 if (rx_desc_get_wrap(s->rx_desc[q])) { 1198 DB_PRINT("wrapping RX descriptor list\n"); 1199 s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q); 1200 } else { 1201 DB_PRINT("incrementing RX descriptor list\n"); 1202 s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true); 1203 } 1204 1205 gem_get_rx_desc(s, q); 1206 } 1207 1208 /* Count it */ 1209 gem_receive_updatestats(s, buf, size); 1210 1211 s->regs[R_RXSTATUS] |= R_RXSTATUS_FRAME_RECEIVED_MASK; 1212 gem_set_isr(s, q, R_ISR_RECV_COMPLETE_MASK); 1213 1214 /* Handle interrupt consequences */ 1215 gem_update_int_status(s); 1216 1217 return size; 1218 } 1219 1220 /* 1221 * gem_transmit_updatestats: 1222 * Increment transmit statistics. 1223 */ 1224 static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet, 1225 unsigned bytes) 1226 { 1227 uint64_t octets; 1228 1229 /* Total octets (bytes) transmitted */ 1230 octets = ((uint64_t)(s->regs[R_OCTTXLO]) << 32) | 1231 s->regs[R_OCTTXHI]; 1232 octets += bytes; 1233 s->regs[R_OCTTXLO] = octets >> 32; 1234 s->regs[R_OCTTXHI] = octets; 1235 1236 /* Error-free Frames transmitted */ 1237 s->regs[R_TXCNT]++; 1238 1239 /* Error-free Broadcast Frames counter */ 1240 if (!memcmp(packet, broadcast_addr, 6)) { 1241 s->regs[R_TXBCNT]++; 1242 } 1243 1244 /* Error-free Multicast Frames counter */ 1245 if (packet[0] == 0x01) { 1246 s->regs[R_TXMCNT]++; 1247 } 1248 1249 if (bytes <= 64) { 1250 s->regs[R_TX64CNT]++; 1251 } else if (bytes <= 127) { 1252 s->regs[R_TX65CNT]++; 1253 } else if (bytes <= 255) { 1254 s->regs[R_TX128CNT]++; 1255 } else if (bytes <= 511) { 1256 s->regs[R_TX256CNT]++; 1257 } else if (bytes <= 1023) { 1258 s->regs[R_TX512CNT]++; 1259 } else if (bytes <= 1518) { 1260 s->regs[R_TX1024CNT]++; 1261 } else { 1262 s->regs[R_TX1519CNT]++; 1263 } 1264 } 1265 1266 /* 1267 * gem_transmit: 1268 * Fish packets out of the descriptor ring and feed them to QEMU 1269 */ 1270 static void gem_transmit(CadenceGEMState *s) 1271 { 1272 uint32_t desc[DESC_MAX_NUM_WORDS]; 1273 hwaddr packet_desc_addr; 1274 uint8_t *p; 1275 unsigned total_bytes; 1276 int q = 0; 1277 1278 /* Do nothing if transmit is not enabled. */ 1279 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { 1280 return; 1281 } 1282 1283 DB_PRINT("\n"); 1284 1285 /* The packet we will hand off to QEMU. 1286 * Packets scattered across multiple descriptors are gathered to this 1287 * one contiguous buffer first. 1288 */ 1289 p = s->tx_packet; 1290 total_bytes = 0; 1291 1292 for (q = s->num_priority_queues - 1; q >= 0; q--) { 1293 /* read current descriptor */ 1294 packet_desc_addr = gem_get_tx_desc_addr(s, q); 1295 1296 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); 1297 address_space_read(&s->dma_as, packet_desc_addr, 1298 MEMTXATTRS_UNSPECIFIED, desc, 1299 sizeof(uint32_t) * gem_get_desc_len(s, false)); 1300 /* Handle all descriptors owned by hardware */ 1301 while (tx_desc_get_used(desc) == 0) { 1302 1303 /* Do nothing if transmit is not enabled. */ 1304 if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) { 1305 return; 1306 } 1307 print_gem_tx_desc(desc, q); 1308 1309 /* The real hardware would eat this (and possibly crash). 1310 * For QEMU let's lend a helping hand. 1311 */ 1312 if ((tx_desc_get_buffer(s, desc) == 0) || 1313 (tx_desc_get_length(desc) == 0)) { 1314 DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n", 1315 packet_desc_addr); 1316 break; 1317 } 1318 1319 if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) - 1320 (p - s->tx_packet)) { 1321 qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \ 1322 HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", 1323 packet_desc_addr, tx_desc_get_length(desc), 1324 gem_get_max_buf_len(s, true) - (p - s->tx_packet)); 1325 gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK); 1326 break; 1327 } 1328 1329 /* Gather this fragment of the packet from "dma memory" to our 1330 * contig buffer. 1331 */ 1332 address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc), 1333 MEMTXATTRS_UNSPECIFIED, 1334 p, tx_desc_get_length(desc)); 1335 p += tx_desc_get_length(desc); 1336 total_bytes += tx_desc_get_length(desc); 1337 1338 /* Last descriptor for this packet; hand the whole thing off */ 1339 if (tx_desc_get_last(desc)) { 1340 uint32_t desc_first[DESC_MAX_NUM_WORDS]; 1341 hwaddr desc_addr = gem_get_tx_desc_addr(s, q); 1342 1343 /* Modify the 1st descriptor of this packet to be owned by 1344 * the processor. 1345 */ 1346 address_space_read(&s->dma_as, desc_addr, 1347 MEMTXATTRS_UNSPECIFIED, desc_first, 1348 sizeof(desc_first)); 1349 tx_desc_set_used(desc_first); 1350 address_space_write(&s->dma_as, desc_addr, 1351 MEMTXATTRS_UNSPECIFIED, desc_first, 1352 sizeof(desc_first)); 1353 /* Advance the hardware current descriptor past this packet */ 1354 if (tx_desc_get_wrap(desc)) { 1355 s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q); 1356 } else { 1357 s->tx_desc_addr[q] = packet_desc_addr + 1358 4 * gem_get_desc_len(s, false); 1359 } 1360 DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); 1361 1362 s->regs[R_TXSTATUS] |= R_TXSTATUS_TRANSMIT_COMPLETE_MASK; 1363 gem_set_isr(s, q, R_ISR_XMIT_COMPLETE_MASK); 1364 1365 /* Handle interrupt consequences */ 1366 gem_update_int_status(s); 1367 1368 /* Is checksum offload enabled? */ 1369 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, TX_PBUF_CSUM_OFFLOAD)) { 1370 net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); 1371 } 1372 1373 /* Update MAC statistics */ 1374 gem_transmit_updatestats(s, s->tx_packet, total_bytes); 1375 1376 /* Send the packet somewhere */ 1377 if (s->phy_loop || FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, 1378 LOOPBACK_LOCAL)) { 1379 qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet, 1380 total_bytes); 1381 } else { 1382 qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet, 1383 total_bytes); 1384 } 1385 1386 /* Prepare for next packet */ 1387 p = s->tx_packet; 1388 total_bytes = 0; 1389 } 1390 1391 /* read next descriptor */ 1392 if (tx_desc_get_wrap(desc)) { 1393 if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) { 1394 packet_desc_addr = s->regs[R_TBQPH]; 1395 packet_desc_addr <<= 32; 1396 } else { 1397 packet_desc_addr = 0; 1398 } 1399 packet_desc_addr |= gem_get_tx_queue_base_addr(s, q); 1400 } else { 1401 packet_desc_addr += 4 * gem_get_desc_len(s, false); 1402 } 1403 DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); 1404 address_space_read(&s->dma_as, packet_desc_addr, 1405 MEMTXATTRS_UNSPECIFIED, desc, 1406 sizeof(uint32_t) * gem_get_desc_len(s, false)); 1407 } 1408 1409 if (tx_desc_get_used(desc)) { 1410 s->regs[R_TXSTATUS] |= R_TXSTATUS_USED_BIT_READ_MASK; 1411 /* IRQ TXUSED is defined only for queue 0 */ 1412 if (q == 0) { 1413 gem_set_isr(s, 0, R_ISR_TX_USED_MASK); 1414 } 1415 gem_update_int_status(s); 1416 } 1417 } 1418 } 1419 1420 static void gem_phy_reset(CadenceGEMState *s) 1421 { 1422 memset(&s->phy_regs[0], 0, sizeof(s->phy_regs)); 1423 s->phy_regs[PHY_REG_CONTROL] = 0x1140; 1424 s->phy_regs[PHY_REG_STATUS] = 0x7969; 1425 s->phy_regs[PHY_REG_PHYID1] = 0x0141; 1426 s->phy_regs[PHY_REG_PHYID2] = 0x0CC2; 1427 s->phy_regs[PHY_REG_ANEGADV] = 0x01E1; 1428 s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1; 1429 s->phy_regs[PHY_REG_ANEGEXP] = 0x000F; 1430 s->phy_regs[PHY_REG_NEXTP] = 0x2001; 1431 s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6; 1432 s->phy_regs[PHY_REG_100BTCTRL] = 0x0300; 1433 s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00; 1434 s->phy_regs[PHY_REG_EXTSTAT] = 0x3000; 1435 s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078; 1436 s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00; 1437 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60; 1438 s->phy_regs[PHY_REG_LED] = 0x4100; 1439 s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A; 1440 s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B; 1441 1442 phy_update_link(s); 1443 } 1444 1445 static void gem_reset(DeviceState *d) 1446 { 1447 int i; 1448 CadenceGEMState *s = CADENCE_GEM(d); 1449 const uint8_t *a; 1450 uint32_t queues_mask = 0; 1451 1452 DB_PRINT("\n"); 1453 1454 /* Set post reset register values */ 1455 memset(&s->regs[0], 0, sizeof(s->regs)); 1456 s->regs[R_NWCFG] = 0x00080000; 1457 s->regs[R_NWSTATUS] = 0x00000006; 1458 s->regs[R_DMACFG] = 0x00020784; 1459 s->regs[R_IMR] = 0x07ffffff; 1460 s->regs[R_TXPAUSE] = 0x0000ffff; 1461 s->regs[R_TXPARTIALSF] = 0x000003ff; 1462 s->regs[R_RXPARTIALSF] = 0x000003ff; 1463 s->regs[R_MODID] = s->revision; 1464 s->regs[R_DESCONF] = 0x02D00111; 1465 s->regs[R_DESCONF2] = 0x2ab10000 | s->jumbo_max_len; 1466 s->regs[R_DESCONF5] = 0x002f2045; 1467 s->regs[R_DESCONF6] = R_DESCONF6_DMA_ADDR_64B_MASK; 1468 s->regs[R_INT_Q1_MASK] = 0x00000CE6; 1469 s->regs[R_JUMBO_MAX_LEN] = s->jumbo_max_len; 1470 1471 if (s->num_priority_queues > 1) { 1472 queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); 1473 s->regs[R_DESCONF6] |= queues_mask; 1474 } 1475 1476 /* Set MAC address */ 1477 a = &s->conf.macaddr.a[0]; 1478 s->regs[R_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24); 1479 s->regs[R_SPADDR1HI] = a[4] | (a[5] << 8); 1480 1481 for (i = 0; i < 4; i++) { 1482 s->sar_active[i] = false; 1483 } 1484 1485 gem_phy_reset(s); 1486 1487 gem_update_int_status(s); 1488 } 1489 1490 static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num) 1491 { 1492 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]); 1493 return s->phy_regs[reg_num]; 1494 } 1495 1496 static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val) 1497 { 1498 DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val); 1499 1500 switch (reg_num) { 1501 case PHY_REG_CONTROL: 1502 if (val & PHY_REG_CONTROL_RST) { 1503 /* Phy reset */ 1504 gem_phy_reset(s); 1505 val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP); 1506 s->phy_loop = 0; 1507 } 1508 if (val & PHY_REG_CONTROL_ANEG) { 1509 /* Complete autonegotiation immediately */ 1510 val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART); 1511 s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL; 1512 } 1513 if (val & PHY_REG_CONTROL_LOOP) { 1514 DB_PRINT("PHY placed in loopback\n"); 1515 s->phy_loop = 1; 1516 } else { 1517 s->phy_loop = 0; 1518 } 1519 break; 1520 } 1521 s->phy_regs[reg_num] = val; 1522 } 1523 1524 static void gem_handle_phy_access(CadenceGEMState *s) 1525 { 1526 uint32_t val = s->regs[R_PHYMNTNC]; 1527 uint32_t phy_addr, reg_num; 1528 1529 phy_addr = FIELD_EX32(val, PHYMNTNC, PHY_ADDR); 1530 1531 if (phy_addr != s->phy_addr) { 1532 /* no phy at this address */ 1533 if (FIELD_EX32(val, PHYMNTNC, OP) == MDIO_OP_READ) { 1534 s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA, 0xffff); 1535 } 1536 return; 1537 } 1538 1539 reg_num = FIELD_EX32(val, PHYMNTNC, REG_ADDR); 1540 1541 switch (FIELD_EX32(val, PHYMNTNC, OP)) { 1542 case MDIO_OP_READ: 1543 s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA, 1544 gem_phy_read(s, reg_num)); 1545 break; 1546 1547 case MDIO_OP_WRITE: 1548 gem_phy_write(s, reg_num, val); 1549 break; 1550 1551 default: 1552 break; /* only clause 22 operations are supported */ 1553 } 1554 } 1555 1556 /* 1557 * gem_read32: 1558 * Read a GEM register. 1559 */ 1560 static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) 1561 { 1562 CadenceGEMState *s; 1563 uint32_t retval; 1564 s = opaque; 1565 1566 offset >>= 2; 1567 retval = s->regs[offset]; 1568 1569 DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval); 1570 1571 switch (offset) { 1572 case R_ISR: 1573 DB_PRINT("lowering irqs on ISR read\n"); 1574 /* The interrupts get updated at the end of the function. */ 1575 break; 1576 } 1577 1578 /* Squash read to clear bits */ 1579 s->regs[offset] &= ~(s->regs_rtc[offset]); 1580 1581 /* Do not provide write only bits */ 1582 retval &= ~(s->regs_wo[offset]); 1583 1584 DB_PRINT("0x%08x\n", retval); 1585 gem_update_int_status(s); 1586 return retval; 1587 } 1588 1589 /* 1590 * gem_write32: 1591 * Write a GEM register. 1592 */ 1593 static void gem_write(void *opaque, hwaddr offset, uint64_t val, 1594 unsigned size) 1595 { 1596 CadenceGEMState *s = (CadenceGEMState *)opaque; 1597 uint32_t readonly; 1598 int i; 1599 1600 DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val); 1601 offset >>= 2; 1602 1603 /* Squash bits which are read only in write value */ 1604 val &= ~(s->regs_ro[offset]); 1605 /* Preserve (only) bits which are read only and wtc in register */ 1606 readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]); 1607 1608 /* Copy register write to backing store */ 1609 s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly; 1610 1611 /* do w1c */ 1612 s->regs[offset] &= ~(s->regs_w1c[offset] & val); 1613 1614 /* Handle register write side effects */ 1615 switch (offset) { 1616 case R_NWCTRL: 1617 if (FIELD_EX32(val, NWCTRL, ENABLE_RECEIVE)) { 1618 for (i = 0; i < s->num_priority_queues; ++i) { 1619 gem_get_rx_desc(s, i); 1620 } 1621 } 1622 if (FIELD_EX32(val, NWCTRL, TRANSMIT_START)) { 1623 gem_transmit(s); 1624 } 1625 if (!(FIELD_EX32(val, NWCTRL, ENABLE_TRANSMIT))) { 1626 /* Reset to start of Q when transmit disabled. */ 1627 for (i = 0; i < s->num_priority_queues; i++) { 1628 s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i); 1629 } 1630 } 1631 if (gem_can_receive(qemu_get_queue(s->nic))) { 1632 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 1633 } 1634 break; 1635 1636 case R_TXSTATUS: 1637 gem_update_int_status(s); 1638 break; 1639 case R_RXQBASE: 1640 s->rx_desc_addr[0] = val; 1641 break; 1642 case R_RECEIVE_Q1_PTR ... R_RECEIVE_Q7_PTR: 1643 s->rx_desc_addr[offset - R_RECEIVE_Q1_PTR + 1] = val; 1644 break; 1645 case R_TXQBASE: 1646 s->tx_desc_addr[0] = val; 1647 break; 1648 case R_TRANSMIT_Q1_PTR ... R_TRANSMIT_Q7_PTR: 1649 s->tx_desc_addr[offset - R_TRANSMIT_Q1_PTR + 1] = val; 1650 break; 1651 case R_RXSTATUS: 1652 gem_update_int_status(s); 1653 break; 1654 case R_IER: 1655 s->regs[R_IMR] &= ~val; 1656 gem_update_int_status(s); 1657 break; 1658 case R_JUMBO_MAX_LEN: 1659 s->regs[R_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK; 1660 break; 1661 case R_INT_Q1_ENABLE ... R_INT_Q7_ENABLE: 1662 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_ENABLE] &= ~val; 1663 gem_update_int_status(s); 1664 break; 1665 case R_IDR: 1666 s->regs[R_IMR] |= val; 1667 gem_update_int_status(s); 1668 break; 1669 case R_INT_Q1_DISABLE ... R_INT_Q7_DISABLE: 1670 s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_DISABLE] |= val; 1671 gem_update_int_status(s); 1672 break; 1673 case R_SPADDR1LO: 1674 case R_SPADDR2LO: 1675 case R_SPADDR3LO: 1676 case R_SPADDR4LO: 1677 s->sar_active[(offset - R_SPADDR1LO) / 2] = false; 1678 break; 1679 case R_SPADDR1HI: 1680 case R_SPADDR2HI: 1681 case R_SPADDR3HI: 1682 case R_SPADDR4HI: 1683 s->sar_active[(offset - R_SPADDR1HI) / 2] = true; 1684 break; 1685 case R_PHYMNTNC: 1686 gem_handle_phy_access(s); 1687 break; 1688 } 1689 1690 DB_PRINT("newval: 0x%08x\n", s->regs[offset]); 1691 } 1692 1693 static const MemoryRegionOps gem_ops = { 1694 .read = gem_read, 1695 .write = gem_write, 1696 .endianness = DEVICE_LITTLE_ENDIAN, 1697 }; 1698 1699 static void gem_set_link(NetClientState *nc) 1700 { 1701 CadenceGEMState *s = qemu_get_nic_opaque(nc); 1702 1703 DB_PRINT("\n"); 1704 phy_update_link(s); 1705 gem_update_int_status(s); 1706 } 1707 1708 static NetClientInfo net_gem_info = { 1709 .type = NET_CLIENT_DRIVER_NIC, 1710 .size = sizeof(NICState), 1711 .can_receive = gem_can_receive, 1712 .receive = gem_receive, 1713 .link_status_changed = gem_set_link, 1714 }; 1715 1716 static void gem_realize(DeviceState *dev, Error **errp) 1717 { 1718 CadenceGEMState *s = CADENCE_GEM(dev); 1719 int i; 1720 1721 address_space_init(&s->dma_as, 1722 s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); 1723 1724 if (s->num_priority_queues == 0 || 1725 s->num_priority_queues > MAX_PRIORITY_QUEUES) { 1726 error_setg(errp, "Invalid num-priority-queues value: %" PRIx8, 1727 s->num_priority_queues); 1728 return; 1729 } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) { 1730 error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8, 1731 s->num_type1_screeners); 1732 return; 1733 } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) { 1734 error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8, 1735 s->num_type2_screeners); 1736 return; 1737 } 1738 1739 for (i = 0; i < s->num_priority_queues; ++i) { 1740 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); 1741 } 1742 1743 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1744 1745 s->nic = qemu_new_nic(&net_gem_info, &s->conf, 1746 object_get_typename(OBJECT(dev)), dev->id, 1747 &dev->mem_reentrancy_guard, s); 1748 1749 if (s->jumbo_max_len > MAX_FRAME_SIZE) { 1750 error_setg(errp, "jumbo-max-len is greater than %d", 1751 MAX_FRAME_SIZE); 1752 return; 1753 } 1754 } 1755 1756 static void gem_init(Object *obj) 1757 { 1758 CadenceGEMState *s = CADENCE_GEM(obj); 1759 DeviceState *dev = DEVICE(obj); 1760 1761 DB_PRINT("\n"); 1762 1763 gem_init_register_masks(s); 1764 memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s, 1765 "enet", sizeof(s->regs)); 1766 1767 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); 1768 } 1769 1770 static const VMStateDescription vmstate_cadence_gem = { 1771 .name = "cadence_gem", 1772 .version_id = 4, 1773 .minimum_version_id = 4, 1774 .fields = (const VMStateField[]) { 1775 VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), 1776 VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), 1777 VMSTATE_UINT8(phy_loop, CadenceGEMState), 1778 VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState, 1779 MAX_PRIORITY_QUEUES), 1780 VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState, 1781 MAX_PRIORITY_QUEUES), 1782 VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4), 1783 VMSTATE_END_OF_LIST(), 1784 } 1785 }; 1786 1787 static Property gem_properties[] = { 1788 DEFINE_NIC_PROPERTIES(CadenceGEMState, conf), 1789 DEFINE_PROP_UINT32("revision", CadenceGEMState, revision, 1790 GEM_MODID_VALUE), 1791 DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS), 1792 DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState, 1793 num_priority_queues, 1), 1794 DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState, 1795 num_type1_screeners, 4), 1796 DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState, 1797 num_type2_screeners, 4), 1798 DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState, 1799 jumbo_max_len, 10240), 1800 DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr, 1801 TYPE_MEMORY_REGION, MemoryRegion *), 1802 DEFINE_PROP_END_OF_LIST(), 1803 }; 1804 1805 static void gem_class_init(ObjectClass *klass, void *data) 1806 { 1807 DeviceClass *dc = DEVICE_CLASS(klass); 1808 1809 dc->realize = gem_realize; 1810 device_class_set_props(dc, gem_properties); 1811 dc->vmsd = &vmstate_cadence_gem; 1812 device_class_set_legacy_reset(dc, gem_reset); 1813 } 1814 1815 static const TypeInfo gem_info = { 1816 .name = TYPE_CADENCE_GEM, 1817 .parent = TYPE_SYS_BUS_DEVICE, 1818 .instance_size = sizeof(CadenceGEMState), 1819 .instance_init = gem_init, 1820 .class_init = gem_class_init, 1821 }; 1822 1823 static void gem_register_types(void) 1824 { 1825 type_register_static(&gem_info); 1826 } 1827 1828 type_init(gem_register_types) 1829