1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* Some linux -> U-Boot compatibility stuff */ 39 #define netdev_err(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_warn(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_info(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 #define netdev_dbg(dev, fmt, args...) \ 46 printf(fmt, ##args) 47 48 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 49 50 #define __verify_pcpu_ptr(ptr) \ 51 do { \ 52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 53 (void)__vpp_verify; \ 54 } while (0) 55 56 #define VERIFY_PERCPU_PTR(__p) \ 57 ({ \ 58 __verify_pcpu_ptr(__p); \ 59 (typeof(*(__p)) __kernel __force *)(__p); \ 60 }) 61 62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 63 #define smp_processor_id() 0 64 #define num_present_cpus() 1 65 #define for_each_present_cpu(cpu) \ 66 for ((cpu) = 0; (cpu) < 1; (cpu)++) 67 68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 69 70 #define CONFIG_NR_CPUS 1 71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 72 73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 74 #define WRAP (2 + ETH_HLEN + 4 + 32) 75 #define MTU 1500 76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 77 78 #define MVPP2_SMI_TIMEOUT 10000 79 80 /* RX Fifo Registers */ 81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 84 #define MVPP2_RX_FIFO_INIT_REG 0x64 85 86 /* RX DMA Top Registers */ 87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 98 #define MVPP2_RXQ_POOL_LONG_OFFS 24 99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 103 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 104 105 /* Parser Registers */ 106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 107 #define MVPP2_PRS_PORT_LU_MAX 0xf 108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 123 124 /* Classifier Registers */ 125 #define MVPP2_CLS_MODE_REG 0x1800 126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 127 #define MVPP2_CLS_PORT_WAY_REG 0x1810 128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 131 #define MVPP2_CLS_LKP_TBL_REG 0x1818 132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 144 145 /* Descriptor Manager Top Registers */ 146 #define MVPP2_RXQ_NUM_REG 0x2040 147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 148 #define MVPP22_DESC_ADDR_OFFS 8 149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 158 #define MVPP2_RXQ_THRESH_REG 0x204c 159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 161 #define MVPP2_RXQ_INDEX_REG 0x2050 162 #define MVPP2_TXQ_NUM_REG 0x2080 163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 167 #define MVPP2_TXQ_THRESH_REG 0x2094 168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 170 #define MVPP2_TXQ_INDEX_REG 0x2098 171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 177 #define MVPP2_TXQ_PENDING_REG 0x20a0 178 #define MVPP2_TXQ_PENDING_MASK 0x3fff 179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 196 197 /* MBUS bridge registers */ 198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 201 #define MVPP2_BASE_ADDR_ENABLE 0x4060 202 203 /* AXI Bridge Registers */ 204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 216 217 /* Values for AXI Bridge registers */ 218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 220 221 #define MVPP22_AXI_CODE_CACHE_OFFS 0 222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 223 224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 227 228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 230 231 /* Interrupt Cause and Mask registers */ 232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 234 235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 239 240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 242 243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 247 248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 266 267 /* Buffer Manager registers */ 268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 281 #define MVPP2_BM_START_MASK BIT(0) 282 #define MVPP2_BM_STOP_MASK BIT(1) 283 #define MVPP2_BM_STATE_MASK BIT(4) 284 #define MVPP2_BM_LOW_THRESH_OFFS 8 285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 287 MVPP2_BM_LOW_THRESH_OFFS) 288 #define MVPP2_BM_HIGH_THRESH_OFFS 16 289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 291 MVPP2_BM_HIGH_THRESH_OFFS) 292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 311 #define MVPP21_BM_MC_RLS_REG 0x64c4 312 #define MVPP2_BM_MC_ID_MASK 0xfff 313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 318 #define MVPP22_BM_MC_RLS_REG 0x64d4 319 #define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310 320 #define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff 321 322 /* TX Scheduler registers */ 323 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 324 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 325 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 326 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 327 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 328 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 329 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 330 #define MVPP2_TXP_MTU_MAX 0x7FFFF 331 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 332 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 333 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 334 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 335 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 336 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 337 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 338 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 339 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 340 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 341 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 342 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 343 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 344 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 345 346 /* TX general registers */ 347 #define MVPP2_TX_SNOOP_REG 0x8800 348 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 349 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 350 351 /* LMS registers */ 352 #define MVPP2_SRC_ADDR_MIDDLE 0x24 353 #define MVPP2_SRC_ADDR_HIGH 0x28 354 #define MVPP2_PHY_AN_CFG0_REG 0x34 355 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 356 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 357 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 358 359 /* Per-port registers */ 360 #define MVPP2_GMAC_CTRL_0_REG 0x0 361 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 362 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 363 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 364 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 365 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 366 #define MVPP2_GMAC_CTRL_1_REG 0x4 367 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 368 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 369 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 370 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 371 #define MVPP2_GMAC_SA_LOW_OFFS 7 372 #define MVPP2_GMAC_CTRL_2_REG 0x8 373 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 374 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 375 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 376 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 377 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 378 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 379 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 380 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 381 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 382 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 383 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 384 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 385 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 386 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 387 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 388 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 389 #define MVPP2_GMAC_EN_FC_AN BIT(11) 390 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 391 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 392 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 393 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 395 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 396 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 397 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 398 #define MVPP2_GMAC_CTRL_4_REG 0x90 399 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 400 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 401 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 402 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 403 404 /* 405 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 406 * relative to port->base. 407 */ 408 409 /* Port Mac Control0 */ 410 #define MVPP22_XLG_CTRL0_REG 0x100 411 #define MVPP22_XLG_PORT_EN BIT(0) 412 #define MVPP22_XLG_MAC_RESETN BIT(1) 413 #define MVPP22_XLG_RX_FC_EN BIT(7) 414 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 415 /* Port Mac Control1 */ 416 #define MVPP22_XLG_CTRL1_REG 0x104 417 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 418 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 419 /* Port Interrupt Mask */ 420 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 421 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 422 /* Port Mac Control3 */ 423 #define MVPP22_XLG_CTRL3_REG 0x11c 424 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 425 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 426 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 427 /* Port Mac Control4 */ 428 #define MVPP22_XLG_CTRL4_REG 0x184 429 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 430 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 431 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 432 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 433 434 /* XPCS registers */ 435 436 /* Global Configuration 0 */ 437 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 438 #define MVPP22_XPCS_PCSRESET BIT(0) 439 #define MVPP22_XPCS_PCSMODE_OFFS 3 440 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 441 MVPP22_XPCS_PCSMODE_OFFS) 442 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 443 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 444 MVPP22_XPCS_LANEACTIVE_OFFS) 445 446 /* MPCS registers */ 447 448 #define PCS40G_COMMON_CONTROL 0x14 449 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 450 451 #define PCS_CLOCK_RESET 0x14c 452 #define TX_SD_CLK_RESET_MASK BIT(0) 453 #define RX_SD_CLK_RESET_MASK BIT(1) 454 #define MAC_CLK_RESET_MASK BIT(2) 455 #define CLK_DIVISION_RATIO_OFFS 4 456 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 457 #define CLK_DIV_PHASE_SET_MASK BIT(11) 458 459 /* System Soft Reset 1 */ 460 #define GOP_SOFT_RESET_1_REG 0x108 461 #define NETC_GOP_SOFT_RESET_OFFS 6 462 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 463 NETC_GOP_SOFT_RESET_OFFS) 464 465 /* Ports Control 0 */ 466 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 467 #define NETC_BUS_WIDTH_SELECT_OFFS 1 468 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 469 NETC_BUS_WIDTH_SELECT_OFFS) 470 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 471 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 472 NETC_GIG_RX_DATA_SAMPLE_OFFS) 473 #define NETC_CLK_DIV_PHASE_OFFS 31 474 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 475 /* Ports Control 1 */ 476 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 477 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 478 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 479 NETC_PORTS_ACTIVE_OFFSET(p)) 480 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 481 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 482 NETC_PORT_GIG_RF_RESET_OFFS(p)) 483 #define NETCOMP_CONTROL_0_REG 0x120 484 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 485 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 486 NETC_GBE_PORT0_SGMII_MODE_OFFS) 487 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 488 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 489 NETC_GBE_PORT1_SGMII_MODE_OFFS) 490 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 491 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 492 NETC_GBE_PORT1_MII_MODE_OFFS) 493 494 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 495 #define MVPP22_SMI_POLLING_EN BIT(10) 496 497 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 498 (0x4 * (port))) 499 500 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 501 502 /* Descriptor ring Macros */ 503 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 504 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 505 506 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 507 #define MVPP21_SMI 0x0054 508 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 509 #define MVPP22_SMI 0x1200 510 #define MVPP2_PHY_REG_MASK 0x1f 511 /* SMI register fields */ 512 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 513 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 514 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 515 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 516 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 517 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 518 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 519 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 520 521 #define MVPP2_PHY_ADDR_MASK 0x1f 522 #define MVPP2_PHY_REG_MASK 0x1f 523 524 /* Additional PPv2.2 offsets */ 525 #define MVPP22_MPCS 0x007000 526 #define MVPP22_XPCS 0x007400 527 #define MVPP22_PORT_BASE 0x007e00 528 #define MVPP22_PORT_OFFSET 0x001000 529 #define MVPP22_RFU1 0x318000 530 531 /* Maximum number of ports */ 532 #define MVPP22_GOP_MAC_NUM 4 533 534 /* Sets the field located at the specified in data */ 535 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 536 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 537 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 538 539 /* Net Complex */ 540 enum mv_netc_topology { 541 MV_NETC_GE_MAC2_SGMII = BIT(0), 542 MV_NETC_GE_MAC3_SGMII = BIT(1), 543 MV_NETC_GE_MAC3_RGMII = BIT(2), 544 }; 545 546 enum mv_netc_phase { 547 MV_NETC_FIRST_PHASE, 548 MV_NETC_SECOND_PHASE, 549 }; 550 551 enum mv_netc_sgmii_xmi_mode { 552 MV_NETC_GBE_SGMII, 553 MV_NETC_GBE_XMII, 554 }; 555 556 enum mv_netc_mii_mode { 557 MV_NETC_GBE_RGMII, 558 MV_NETC_GBE_MII, 559 }; 560 561 enum mv_netc_lanes { 562 MV_NETC_LANE_23, 563 MV_NETC_LANE_45, 564 }; 565 566 /* Various constants */ 567 568 /* Coalescing */ 569 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 570 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 571 #define MVPP2_RX_COAL_PKTS 32 572 #define MVPP2_RX_COAL_USEC 100 573 574 /* The two bytes Marvell header. Either contains a special value used 575 * by Marvell switches when a specific hardware mode is enabled (not 576 * supported by this driver) or is filled automatically by zeroes on 577 * the RX side. Those two bytes being at the front of the Ethernet 578 * header, they allow to have the IP header aligned on a 4 bytes 579 * boundary automatically: the hardware skips those two bytes on its 580 * own. 581 */ 582 #define MVPP2_MH_SIZE 2 583 #define MVPP2_ETH_TYPE_LEN 2 584 #define MVPP2_PPPOE_HDR_SIZE 8 585 #define MVPP2_VLAN_TAG_LEN 4 586 587 /* Lbtd 802.3 type */ 588 #define MVPP2_IP_LBDT_TYPE 0xfffa 589 590 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 591 #define MVPP2_TX_CSUM_MAX_SIZE 9800 592 593 /* Timeout constants */ 594 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 595 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 596 597 #define MVPP2_TX_MTU_MAX 0x7ffff 598 599 /* Maximum number of T-CONTs of PON port */ 600 #define MVPP2_MAX_TCONT 16 601 602 /* Maximum number of supported ports */ 603 #define MVPP2_MAX_PORTS 4 604 605 /* Maximum number of TXQs used by single port */ 606 #define MVPP2_MAX_TXQ 8 607 608 /* Default number of TXQs in use */ 609 #define MVPP2_DEFAULT_TXQ 1 610 611 /* Dfault number of RXQs in use */ 612 #define MVPP2_DEFAULT_RXQ 1 613 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 614 615 /* Max number of Rx descriptors */ 616 #define MVPP2_MAX_RXD 16 617 618 /* Max number of Tx descriptors */ 619 #define MVPP2_MAX_TXD 16 620 621 /* Amount of Tx descriptors that can be reserved at once by CPU */ 622 #define MVPP2_CPU_DESC_CHUNK 16 623 624 /* Max number of Tx descriptors in each aggregated queue */ 625 #define MVPP2_AGGR_TXQ_SIZE 16 626 627 /* Descriptor aligned size */ 628 #define MVPP2_DESC_ALIGNED_SIZE 32 629 630 /* Descriptor alignment mask */ 631 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 632 633 /* RX FIFO constants */ 634 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 635 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 636 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 637 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 638 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 639 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 640 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 641 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 642 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 643 644 /* TX general registers */ 645 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 646 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 647 648 /* TX FIFO constants */ 649 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 650 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 651 652 /* RX buffer constants */ 653 #define MVPP2_SKB_SHINFO_SIZE \ 654 0 655 656 #define MVPP2_RX_PKT_SIZE(mtu) \ 657 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 658 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 659 660 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 661 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 662 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 663 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 664 665 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 666 667 /* IPv6 max L3 address size */ 668 #define MVPP2_MAX_L3_ADDR_SIZE 16 669 670 /* Port flags */ 671 #define MVPP2_F_LOOPBACK BIT(0) 672 673 /* Marvell tag types */ 674 enum mvpp2_tag_type { 675 MVPP2_TAG_TYPE_NONE = 0, 676 MVPP2_TAG_TYPE_MH = 1, 677 MVPP2_TAG_TYPE_DSA = 2, 678 MVPP2_TAG_TYPE_EDSA = 3, 679 MVPP2_TAG_TYPE_VLAN = 4, 680 MVPP2_TAG_TYPE_LAST = 5 681 }; 682 683 /* Parser constants */ 684 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 685 #define MVPP2_PRS_TCAM_WORDS 6 686 #define MVPP2_PRS_SRAM_WORDS 4 687 #define MVPP2_PRS_FLOW_ID_SIZE 64 688 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 689 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 690 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 691 #define MVPP2_PRS_IPV4_HEAD 0x40 692 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 693 #define MVPP2_PRS_IPV4_MC 0xe0 694 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 695 #define MVPP2_PRS_IPV4_BC_MASK 0xff 696 #define MVPP2_PRS_IPV4_IHL 0x5 697 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 698 #define MVPP2_PRS_IPV6_MC 0xff 699 #define MVPP2_PRS_IPV6_MC_MASK 0xff 700 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 701 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 702 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 703 #define MVPP2_PRS_DBL_VLANS_MAX 100 704 705 /* Tcam structure: 706 * - lookup ID - 4 bits 707 * - port ID - 1 byte 708 * - additional information - 1 byte 709 * - header data - 8 bytes 710 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 711 */ 712 #define MVPP2_PRS_AI_BITS 8 713 #define MVPP2_PRS_PORT_MASK 0xff 714 #define MVPP2_PRS_LU_MASK 0xf 715 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 716 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 717 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 718 (((offs) * 2) - ((offs) % 2) + 2) 719 #define MVPP2_PRS_TCAM_AI_BYTE 16 720 #define MVPP2_PRS_TCAM_PORT_BYTE 17 721 #define MVPP2_PRS_TCAM_LU_BYTE 20 722 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 723 #define MVPP2_PRS_TCAM_INV_WORD 5 724 /* Tcam entries ID */ 725 #define MVPP2_PE_DROP_ALL 0 726 #define MVPP2_PE_FIRST_FREE_TID 1 727 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 728 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 729 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 730 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 731 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 732 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 733 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 734 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 735 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 736 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 737 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 738 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 739 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 740 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 741 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 742 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 743 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 744 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 745 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 746 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 747 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 748 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 749 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 750 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 751 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 752 753 /* Sram structure 754 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 755 */ 756 #define MVPP2_PRS_SRAM_RI_OFFS 0 757 #define MVPP2_PRS_SRAM_RI_WORD 0 758 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 759 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 760 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 761 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 762 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 763 #define MVPP2_PRS_SRAM_UDF_OFFS 73 764 #define MVPP2_PRS_SRAM_UDF_BITS 8 765 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 766 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 767 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 768 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 769 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 770 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 774 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 775 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 780 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 781 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 782 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 783 #define MVPP2_PRS_SRAM_AI_OFFS 90 784 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 785 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 786 #define MVPP2_PRS_SRAM_AI_MASK 0xff 787 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 788 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 789 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 790 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 791 792 /* Sram result info bits assignment */ 793 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 794 #define MVPP2_PRS_RI_DSA_MASK 0x2 795 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 796 #define MVPP2_PRS_RI_VLAN_NONE 0x0 797 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 798 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 799 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 800 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 801 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 802 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 803 #define MVPP2_PRS_RI_L2_UCAST 0x0 804 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 805 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 806 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 807 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 808 #define MVPP2_PRS_RI_L3_UN 0x0 809 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 810 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 811 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 812 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 813 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 814 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 815 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 816 #define MVPP2_PRS_RI_L3_UCAST 0x0 817 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 818 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 819 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 820 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 821 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 822 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 823 #define MVPP2_PRS_RI_L4_TCP BIT(22) 824 #define MVPP2_PRS_RI_L4_UDP BIT(23) 825 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 826 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 827 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 828 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 829 830 /* Sram additional info bits assignment */ 831 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 832 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 833 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 834 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 835 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 836 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 837 #define MVPP2_PRS_SINGLE_VLAN_AI 0 838 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 839 840 /* DSA/EDSA type */ 841 #define MVPP2_PRS_TAGGED true 842 #define MVPP2_PRS_UNTAGGED false 843 #define MVPP2_PRS_EDSA true 844 #define MVPP2_PRS_DSA false 845 846 /* MAC entries, shadow udf */ 847 enum mvpp2_prs_udf { 848 MVPP2_PRS_UDF_MAC_DEF, 849 MVPP2_PRS_UDF_MAC_RANGE, 850 MVPP2_PRS_UDF_L2_DEF, 851 MVPP2_PRS_UDF_L2_DEF_COPY, 852 MVPP2_PRS_UDF_L2_USER, 853 }; 854 855 /* Lookup ID */ 856 enum mvpp2_prs_lookup { 857 MVPP2_PRS_LU_MH, 858 MVPP2_PRS_LU_MAC, 859 MVPP2_PRS_LU_DSA, 860 MVPP2_PRS_LU_VLAN, 861 MVPP2_PRS_LU_L2, 862 MVPP2_PRS_LU_PPPOE, 863 MVPP2_PRS_LU_IP4, 864 MVPP2_PRS_LU_IP6, 865 MVPP2_PRS_LU_FLOWS, 866 MVPP2_PRS_LU_LAST, 867 }; 868 869 /* L3 cast enum */ 870 enum mvpp2_prs_l3_cast { 871 MVPP2_PRS_L3_UNI_CAST, 872 MVPP2_PRS_L3_MULTI_CAST, 873 MVPP2_PRS_L3_BROAD_CAST 874 }; 875 876 /* Classifier constants */ 877 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 878 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 879 #define MVPP2_CLS_LKP_TBL_SIZE 64 880 881 /* BM constants */ 882 #define MVPP2_BM_POOLS_NUM 1 883 #define MVPP2_BM_LONG_BUF_NUM 16 884 #define MVPP2_BM_SHORT_BUF_NUM 16 885 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 886 #define MVPP2_BM_POOL_PTR_ALIGN 128 887 #define MVPP2_BM_SWF_LONG_POOL(port) 0 888 889 /* BM cookie (32 bits) definition */ 890 #define MVPP2_BM_COOKIE_POOL_OFFS 8 891 #define MVPP2_BM_COOKIE_CPU_OFFS 24 892 893 /* BM short pool packet size 894 * These value assure that for SWF the total number 895 * of bytes allocated for each buffer will be 512 896 */ 897 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 898 899 enum mvpp2_bm_type { 900 MVPP2_BM_FREE, 901 MVPP2_BM_SWF_LONG, 902 MVPP2_BM_SWF_SHORT 903 }; 904 905 /* Definitions */ 906 907 /* Shared Packet Processor resources */ 908 struct mvpp2 { 909 /* Shared registers' base addresses */ 910 void __iomem *base; 911 void __iomem *lms_base; 912 void __iomem *iface_base; 913 void __iomem *mdio_base; 914 915 void __iomem *mpcs_base; 916 void __iomem *xpcs_base; 917 void __iomem *rfu1_base; 918 919 u32 netc_config; 920 921 /* List of pointers to port structures */ 922 struct mvpp2_port **port_list; 923 924 /* Aggregated TXQs */ 925 struct mvpp2_tx_queue *aggr_txqs; 926 927 /* BM pools */ 928 struct mvpp2_bm_pool *bm_pools; 929 930 /* PRS shadow table */ 931 struct mvpp2_prs_shadow *prs_shadow; 932 /* PRS auxiliary table for double vlan entries control */ 933 bool *prs_double_vlans; 934 935 /* Tclk value */ 936 u32 tclk; 937 938 /* HW version */ 939 enum { MVPP21, MVPP22 } hw_version; 940 941 /* Maximum number of RXQs per port */ 942 unsigned int max_port_rxqs; 943 944 struct mii_dev *bus; 945 946 int probe_done; 947 u8 num_ports; 948 }; 949 950 struct mvpp2_pcpu_stats { 951 u64 rx_packets; 952 u64 rx_bytes; 953 u64 tx_packets; 954 u64 tx_bytes; 955 }; 956 957 struct mvpp2_port { 958 u8 id; 959 960 /* Index of the port from the "group of ports" complex point 961 * of view 962 */ 963 int gop_id; 964 965 int irq; 966 967 struct mvpp2 *priv; 968 969 /* Per-port registers' base address */ 970 void __iomem *base; 971 972 struct mvpp2_rx_queue **rxqs; 973 struct mvpp2_tx_queue **txqs; 974 975 int pkt_size; 976 977 u32 pending_cause_rx; 978 979 /* Per-CPU port control */ 980 struct mvpp2_port_pcpu __percpu *pcpu; 981 982 /* Flags */ 983 unsigned long flags; 984 985 u16 tx_ring_size; 986 u16 rx_ring_size; 987 struct mvpp2_pcpu_stats __percpu *stats; 988 989 struct phy_device *phy_dev; 990 phy_interface_t phy_interface; 991 int phy_node; 992 int phyaddr; 993 #ifdef CONFIG_DM_GPIO 994 struct gpio_desc phy_reset_gpio; 995 struct gpio_desc phy_tx_disable_gpio; 996 #endif 997 int init; 998 unsigned int link; 999 unsigned int duplex; 1000 unsigned int speed; 1001 1002 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 1003 1004 struct mvpp2_bm_pool *pool_long; 1005 struct mvpp2_bm_pool *pool_short; 1006 1007 /* Index of first port's physical RXQ */ 1008 u8 first_rxq; 1009 1010 u8 dev_addr[ETH_ALEN]; 1011 }; 1012 1013 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1014 * layout of the transmit and reception DMA descriptors, and their 1015 * layout is therefore defined by the hardware design 1016 */ 1017 1018 #define MVPP2_TXD_L3_OFF_SHIFT 0 1019 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1020 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1021 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1022 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1023 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1024 #define MVPP2_TXD_L4_UDP BIT(24) 1025 #define MVPP2_TXD_L3_IP6 BIT(26) 1026 #define MVPP2_TXD_L_DESC BIT(28) 1027 #define MVPP2_TXD_F_DESC BIT(29) 1028 1029 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1030 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1031 #define MVPP2_RXD_ERR_CRC 0x0 1032 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1033 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1034 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1035 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1036 #define MVPP2_RXD_HWF_SYNC BIT(21) 1037 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1038 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1039 #define MVPP2_RXD_L4_TCP BIT(25) 1040 #define MVPP2_RXD_L4_UDP BIT(26) 1041 #define MVPP2_RXD_L3_IP4 BIT(28) 1042 #define MVPP2_RXD_L3_IP6 BIT(30) 1043 #define MVPP2_RXD_BUF_HDR BIT(31) 1044 1045 /* HW TX descriptor for PPv2.1 */ 1046 struct mvpp21_tx_desc { 1047 u32 command; /* Options used by HW for packet transmitting.*/ 1048 u8 packet_offset; /* the offset from the buffer beginning */ 1049 u8 phys_txq; /* destination queue ID */ 1050 u16 data_size; /* data size of transmitted packet in bytes */ 1051 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1052 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1053 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1054 u32 reserved2; /* reserved (for future use) */ 1055 }; 1056 1057 /* HW RX descriptor for PPv2.1 */ 1058 struct mvpp21_rx_desc { 1059 u32 status; /* info about received packet */ 1060 u16 reserved1; /* parser_info (for future use, PnC) */ 1061 u16 data_size; /* size of received packet in bytes */ 1062 u32 buf_dma_addr; /* physical address of the buffer */ 1063 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1064 u16 reserved2; /* gem_port_id (for future use, PON) */ 1065 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1066 u8 reserved4; /* bm_qset (for future use, BM) */ 1067 u8 reserved5; 1068 u16 reserved6; /* classify_info (for future use, PnC) */ 1069 u32 reserved7; /* flow_id (for future use, PnC) */ 1070 u32 reserved8; 1071 }; 1072 1073 /* HW TX descriptor for PPv2.2 */ 1074 struct mvpp22_tx_desc { 1075 u32 command; 1076 u8 packet_offset; 1077 u8 phys_txq; 1078 u16 data_size; 1079 u64 reserved1; 1080 u64 buf_dma_addr_ptp; 1081 u64 buf_cookie_misc; 1082 }; 1083 1084 /* HW RX descriptor for PPv2.2 */ 1085 struct mvpp22_rx_desc { 1086 u32 status; 1087 u16 reserved1; 1088 u16 data_size; 1089 u32 reserved2; 1090 u32 reserved3; 1091 u64 buf_dma_addr_key_hash; 1092 u64 buf_cookie_misc; 1093 }; 1094 1095 /* Opaque type used by the driver to manipulate the HW TX and RX 1096 * descriptors 1097 */ 1098 struct mvpp2_tx_desc { 1099 union { 1100 struct mvpp21_tx_desc pp21; 1101 struct mvpp22_tx_desc pp22; 1102 }; 1103 }; 1104 1105 struct mvpp2_rx_desc { 1106 union { 1107 struct mvpp21_rx_desc pp21; 1108 struct mvpp22_rx_desc pp22; 1109 }; 1110 }; 1111 1112 /* Per-CPU Tx queue control */ 1113 struct mvpp2_txq_pcpu { 1114 int cpu; 1115 1116 /* Number of Tx DMA descriptors in the descriptor ring */ 1117 int size; 1118 1119 /* Number of currently used Tx DMA descriptor in the 1120 * descriptor ring 1121 */ 1122 int count; 1123 1124 /* Number of Tx DMA descriptors reserved for each CPU */ 1125 int reserved_num; 1126 1127 /* Index of last TX DMA descriptor that was inserted */ 1128 int txq_put_index; 1129 1130 /* Index of the TX DMA descriptor to be cleaned up */ 1131 int txq_get_index; 1132 }; 1133 1134 struct mvpp2_tx_queue { 1135 /* Physical number of this Tx queue */ 1136 u8 id; 1137 1138 /* Logical number of this Tx queue */ 1139 u8 log_id; 1140 1141 /* Number of Tx DMA descriptors in the descriptor ring */ 1142 int size; 1143 1144 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1145 int count; 1146 1147 /* Per-CPU control of physical Tx queues */ 1148 struct mvpp2_txq_pcpu __percpu *pcpu; 1149 1150 u32 done_pkts_coal; 1151 1152 /* Virtual address of thex Tx DMA descriptors array */ 1153 struct mvpp2_tx_desc *descs; 1154 1155 /* DMA address of the Tx DMA descriptors array */ 1156 dma_addr_t descs_dma; 1157 1158 /* Index of the last Tx DMA descriptor */ 1159 int last_desc; 1160 1161 /* Index of the next Tx DMA descriptor to process */ 1162 int next_desc_to_proc; 1163 }; 1164 1165 struct mvpp2_rx_queue { 1166 /* RX queue number, in the range 0-31 for physical RXQs */ 1167 u8 id; 1168 1169 /* Num of rx descriptors in the rx descriptor ring */ 1170 int size; 1171 1172 u32 pkts_coal; 1173 u32 time_coal; 1174 1175 /* Virtual address of the RX DMA descriptors array */ 1176 struct mvpp2_rx_desc *descs; 1177 1178 /* DMA address of the RX DMA descriptors array */ 1179 dma_addr_t descs_dma; 1180 1181 /* Index of the last RX DMA descriptor */ 1182 int last_desc; 1183 1184 /* Index of the next RX DMA descriptor to process */ 1185 int next_desc_to_proc; 1186 1187 /* ID of port to which physical RXQ is mapped */ 1188 int port; 1189 1190 /* Port's logic RXQ number to which physical RXQ is mapped */ 1191 int logic_rxq; 1192 }; 1193 1194 union mvpp2_prs_tcam_entry { 1195 u32 word[MVPP2_PRS_TCAM_WORDS]; 1196 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1197 }; 1198 1199 union mvpp2_prs_sram_entry { 1200 u32 word[MVPP2_PRS_SRAM_WORDS]; 1201 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1202 }; 1203 1204 struct mvpp2_prs_entry { 1205 u32 index; 1206 union mvpp2_prs_tcam_entry tcam; 1207 union mvpp2_prs_sram_entry sram; 1208 }; 1209 1210 struct mvpp2_prs_shadow { 1211 bool valid; 1212 bool finish; 1213 1214 /* Lookup ID */ 1215 int lu; 1216 1217 /* User defined offset */ 1218 int udf; 1219 1220 /* Result info */ 1221 u32 ri; 1222 u32 ri_mask; 1223 }; 1224 1225 struct mvpp2_cls_flow_entry { 1226 u32 index; 1227 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1228 }; 1229 1230 struct mvpp2_cls_lookup_entry { 1231 u32 lkpid; 1232 u32 way; 1233 u32 data; 1234 }; 1235 1236 struct mvpp2_bm_pool { 1237 /* Pool number in the range 0-7 */ 1238 int id; 1239 enum mvpp2_bm_type type; 1240 1241 /* Buffer Pointers Pool External (BPPE) size */ 1242 int size; 1243 /* Number of buffers for this pool */ 1244 int buf_num; 1245 /* Pool buffer size */ 1246 int buf_size; 1247 /* Packet size */ 1248 int pkt_size; 1249 1250 /* BPPE virtual base address */ 1251 unsigned long *virt_addr; 1252 /* BPPE DMA base address */ 1253 dma_addr_t dma_addr; 1254 1255 /* Ports using BM pool */ 1256 u32 port_map; 1257 }; 1258 1259 /* Static declaractions */ 1260 1261 /* Number of RXQs used by single port */ 1262 static int rxq_number = MVPP2_DEFAULT_RXQ; 1263 /* Number of TXQs used by single port */ 1264 static int txq_number = MVPP2_DEFAULT_TXQ; 1265 1266 static int base_id; 1267 1268 #define MVPP2_DRIVER_NAME "mvpp2" 1269 #define MVPP2_DRIVER_VERSION "1.0" 1270 1271 /* 1272 * U-Boot internal data, mostly uncached buffers for descriptors and data 1273 */ 1274 struct buffer_location { 1275 struct mvpp2_tx_desc *aggr_tx_descs; 1276 struct mvpp2_tx_desc *tx_descs; 1277 struct mvpp2_rx_desc *rx_descs; 1278 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1279 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1280 int first_rxq; 1281 }; 1282 1283 /* 1284 * All 4 interfaces use the same global buffer, since only one interface 1285 * can be enabled at once 1286 */ 1287 static struct buffer_location buffer_loc; 1288 1289 /* 1290 * Page table entries are set to 1MB, or multiples of 1MB 1291 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1292 */ 1293 #define BD_SPACE (1 << 20) 1294 1295 /* Utility/helper methods */ 1296 1297 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1298 { 1299 writel(data, priv->base + offset); 1300 } 1301 1302 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1303 { 1304 return readl(priv->base + offset); 1305 } 1306 1307 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1308 struct mvpp2_tx_desc *tx_desc, 1309 dma_addr_t dma_addr) 1310 { 1311 if (port->priv->hw_version == MVPP21) { 1312 tx_desc->pp21.buf_dma_addr = dma_addr; 1313 } else { 1314 u64 val = (u64)dma_addr; 1315 1316 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1317 tx_desc->pp22.buf_dma_addr_ptp |= val; 1318 } 1319 } 1320 1321 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1322 struct mvpp2_tx_desc *tx_desc, 1323 size_t size) 1324 { 1325 if (port->priv->hw_version == MVPP21) 1326 tx_desc->pp21.data_size = size; 1327 else 1328 tx_desc->pp22.data_size = size; 1329 } 1330 1331 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1332 struct mvpp2_tx_desc *tx_desc, 1333 unsigned int txq) 1334 { 1335 if (port->priv->hw_version == MVPP21) 1336 tx_desc->pp21.phys_txq = txq; 1337 else 1338 tx_desc->pp22.phys_txq = txq; 1339 } 1340 1341 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1342 struct mvpp2_tx_desc *tx_desc, 1343 unsigned int command) 1344 { 1345 if (port->priv->hw_version == MVPP21) 1346 tx_desc->pp21.command = command; 1347 else 1348 tx_desc->pp22.command = command; 1349 } 1350 1351 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1352 struct mvpp2_tx_desc *tx_desc, 1353 unsigned int offset) 1354 { 1355 if (port->priv->hw_version == MVPP21) 1356 tx_desc->pp21.packet_offset = offset; 1357 else 1358 tx_desc->pp22.packet_offset = offset; 1359 } 1360 1361 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1362 struct mvpp2_rx_desc *rx_desc) 1363 { 1364 if (port->priv->hw_version == MVPP21) 1365 return rx_desc->pp21.buf_dma_addr; 1366 else 1367 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1368 } 1369 1370 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1371 struct mvpp2_rx_desc *rx_desc) 1372 { 1373 if (port->priv->hw_version == MVPP21) 1374 return rx_desc->pp21.buf_cookie; 1375 else 1376 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1377 } 1378 1379 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1380 struct mvpp2_rx_desc *rx_desc) 1381 { 1382 if (port->priv->hw_version == MVPP21) 1383 return rx_desc->pp21.data_size; 1384 else 1385 return rx_desc->pp22.data_size; 1386 } 1387 1388 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1389 struct mvpp2_rx_desc *rx_desc) 1390 { 1391 if (port->priv->hw_version == MVPP21) 1392 return rx_desc->pp21.status; 1393 else 1394 return rx_desc->pp22.status; 1395 } 1396 1397 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1398 { 1399 txq_pcpu->txq_get_index++; 1400 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1401 txq_pcpu->txq_get_index = 0; 1402 } 1403 1404 /* Get number of physical egress port */ 1405 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1406 { 1407 return MVPP2_MAX_TCONT + port->id; 1408 } 1409 1410 /* Get number of physical TXQ */ 1411 static inline int mvpp2_txq_phys(int port, int txq) 1412 { 1413 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1414 } 1415 1416 /* Parser configuration routines */ 1417 1418 /* Update parser tcam and sram hw entries */ 1419 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1420 { 1421 int i; 1422 1423 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1424 return -EINVAL; 1425 1426 /* Clear entry invalidation bit */ 1427 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1428 1429 /* Write tcam index - indirect access */ 1430 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1431 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1432 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1433 1434 /* Write sram index - indirect access */ 1435 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1436 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1437 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1438 1439 return 0; 1440 } 1441 1442 /* Read tcam entry from hw */ 1443 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1444 { 1445 int i; 1446 1447 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1448 return -EINVAL; 1449 1450 /* Write tcam index - indirect access */ 1451 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1452 1453 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1454 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1455 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1456 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1457 1458 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1459 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1460 1461 /* Write sram index - indirect access */ 1462 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1463 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1464 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1465 1466 return 0; 1467 } 1468 1469 /* Invalidate tcam hw entry */ 1470 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1471 { 1472 /* Write index - indirect access */ 1473 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1474 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1475 MVPP2_PRS_TCAM_INV_MASK); 1476 } 1477 1478 /* Enable shadow table entry and set its lookup ID */ 1479 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1480 { 1481 priv->prs_shadow[index].valid = true; 1482 priv->prs_shadow[index].lu = lu; 1483 } 1484 1485 /* Update ri fields in shadow table entry */ 1486 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1487 unsigned int ri, unsigned int ri_mask) 1488 { 1489 priv->prs_shadow[index].ri_mask = ri_mask; 1490 priv->prs_shadow[index].ri = ri; 1491 } 1492 1493 /* Update lookup field in tcam sw entry */ 1494 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1495 { 1496 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1497 1498 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1499 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1500 } 1501 1502 /* Update mask for single port in tcam sw entry */ 1503 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1504 unsigned int port, bool add) 1505 { 1506 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1507 1508 if (add) 1509 pe->tcam.byte[enable_off] &= ~(1 << port); 1510 else 1511 pe->tcam.byte[enable_off] |= 1 << port; 1512 } 1513 1514 /* Update port map in tcam sw entry */ 1515 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1516 unsigned int ports) 1517 { 1518 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1519 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1520 1521 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1522 pe->tcam.byte[enable_off] &= ~port_mask; 1523 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1524 } 1525 1526 /* Obtain port map from tcam sw entry */ 1527 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1528 { 1529 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1530 1531 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1532 } 1533 1534 /* Set byte of data and its enable bits in tcam sw entry */ 1535 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1536 unsigned int offs, unsigned char byte, 1537 unsigned char enable) 1538 { 1539 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1540 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1541 } 1542 1543 /* Get byte of data and its enable bits from tcam sw entry */ 1544 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1545 unsigned int offs, unsigned char *byte, 1546 unsigned char *enable) 1547 { 1548 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1549 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1550 } 1551 1552 /* Set ethertype in tcam sw entry */ 1553 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1554 unsigned short ethertype) 1555 { 1556 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1557 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1558 } 1559 1560 /* Set bits in sram sw entry */ 1561 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1562 int val) 1563 { 1564 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1565 } 1566 1567 /* Clear bits in sram sw entry */ 1568 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1569 int val) 1570 { 1571 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1572 } 1573 1574 /* Update ri bits in sram sw entry */ 1575 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1576 unsigned int bits, unsigned int mask) 1577 { 1578 unsigned int i; 1579 1580 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1581 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1582 1583 if (!(mask & BIT(i))) 1584 continue; 1585 1586 if (bits & BIT(i)) 1587 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1588 else 1589 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1590 1591 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1592 } 1593 } 1594 1595 /* Update ai bits in sram sw entry */ 1596 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1597 unsigned int bits, unsigned int mask) 1598 { 1599 unsigned int i; 1600 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1601 1602 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1603 1604 if (!(mask & BIT(i))) 1605 continue; 1606 1607 if (bits & BIT(i)) 1608 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1609 else 1610 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1611 1612 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1613 } 1614 } 1615 1616 /* Read ai bits from sram sw entry */ 1617 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1618 { 1619 u8 bits; 1620 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1621 int ai_en_off = ai_off + 1; 1622 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1623 1624 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1625 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1626 1627 return bits; 1628 } 1629 1630 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1631 * lookup interation 1632 */ 1633 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1634 unsigned int lu) 1635 { 1636 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1637 1638 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1639 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1640 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1641 } 1642 1643 /* In the sram sw entry set sign and value of the next lookup offset 1644 * and the offset value generated to the classifier 1645 */ 1646 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1647 unsigned int op) 1648 { 1649 /* Set sign */ 1650 if (shift < 0) { 1651 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1652 shift = 0 - shift; 1653 } else { 1654 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1655 } 1656 1657 /* Set value */ 1658 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1659 (unsigned char)shift; 1660 1661 /* Reset and set operation */ 1662 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1663 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1664 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1665 1666 /* Set base offset as current */ 1667 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1668 } 1669 1670 /* In the sram sw entry set sign and value of the user defined offset 1671 * generated to the classifier 1672 */ 1673 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1674 unsigned int type, int offset, 1675 unsigned int op) 1676 { 1677 /* Set sign */ 1678 if (offset < 0) { 1679 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1680 offset = 0 - offset; 1681 } else { 1682 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1683 } 1684 1685 /* Set value */ 1686 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1687 MVPP2_PRS_SRAM_UDF_MASK); 1688 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1689 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1690 MVPP2_PRS_SRAM_UDF_BITS)] &= 1691 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1692 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1693 MVPP2_PRS_SRAM_UDF_BITS)] |= 1694 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1695 1696 /* Set offset type */ 1697 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1698 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1699 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1700 1701 /* Set offset operation */ 1702 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1703 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1704 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1705 1706 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1707 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1708 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1709 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1710 1711 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1712 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1713 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1714 1715 /* Set base offset as current */ 1716 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1717 } 1718 1719 /* Find parser flow entry */ 1720 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1721 { 1722 struct mvpp2_prs_entry *pe; 1723 int tid; 1724 1725 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1726 if (!pe) 1727 return NULL; 1728 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1729 1730 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1731 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1732 u8 bits; 1733 1734 if (!priv->prs_shadow[tid].valid || 1735 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1736 continue; 1737 1738 pe->index = tid; 1739 mvpp2_prs_hw_read(priv, pe); 1740 bits = mvpp2_prs_sram_ai_get(pe); 1741 1742 /* Sram store classification lookup ID in AI bits [5:0] */ 1743 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1744 return pe; 1745 } 1746 kfree(pe); 1747 1748 return NULL; 1749 } 1750 1751 /* Return first free tcam index, seeking from start to end */ 1752 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1753 unsigned char end) 1754 { 1755 int tid; 1756 1757 if (start > end) 1758 swap(start, end); 1759 1760 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1761 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1762 1763 for (tid = start; tid <= end; tid++) { 1764 if (!priv->prs_shadow[tid].valid) 1765 return tid; 1766 } 1767 1768 return -EINVAL; 1769 } 1770 1771 /* Enable/disable dropping all mac da's */ 1772 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1773 { 1774 struct mvpp2_prs_entry pe; 1775 1776 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1777 /* Entry exist - update port only */ 1778 pe.index = MVPP2_PE_DROP_ALL; 1779 mvpp2_prs_hw_read(priv, &pe); 1780 } else { 1781 /* Entry doesn't exist - create new */ 1782 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1783 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1784 pe.index = MVPP2_PE_DROP_ALL; 1785 1786 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1787 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1788 MVPP2_PRS_RI_DROP_MASK); 1789 1790 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1791 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1792 1793 /* Update shadow table */ 1794 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1795 1796 /* Mask all ports */ 1797 mvpp2_prs_tcam_port_map_set(&pe, 0); 1798 } 1799 1800 /* Update port mask */ 1801 mvpp2_prs_tcam_port_set(&pe, port, add); 1802 1803 mvpp2_prs_hw_write(priv, &pe); 1804 } 1805 1806 /* Set port to promiscuous mode */ 1807 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1808 { 1809 struct mvpp2_prs_entry pe; 1810 1811 /* Promiscuous mode - Accept unknown packets */ 1812 1813 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1814 /* Entry exist - update port only */ 1815 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1816 mvpp2_prs_hw_read(priv, &pe); 1817 } else { 1818 /* Entry doesn't exist - create new */ 1819 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1820 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1821 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1822 1823 /* Continue - set next lookup */ 1824 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1825 1826 /* Set result info bits */ 1827 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1828 MVPP2_PRS_RI_L2_CAST_MASK); 1829 1830 /* Shift to ethertype */ 1831 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1832 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1833 1834 /* Mask all ports */ 1835 mvpp2_prs_tcam_port_map_set(&pe, 0); 1836 1837 /* Update shadow table */ 1838 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1839 } 1840 1841 /* Update port mask */ 1842 mvpp2_prs_tcam_port_set(&pe, port, add); 1843 1844 mvpp2_prs_hw_write(priv, &pe); 1845 } 1846 1847 /* Accept multicast */ 1848 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1849 bool add) 1850 { 1851 struct mvpp2_prs_entry pe; 1852 unsigned char da_mc; 1853 1854 /* Ethernet multicast address first byte is 1855 * 0x01 for IPv4 and 0x33 for IPv6 1856 */ 1857 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1858 1859 if (priv->prs_shadow[index].valid) { 1860 /* Entry exist - update port only */ 1861 pe.index = index; 1862 mvpp2_prs_hw_read(priv, &pe); 1863 } else { 1864 /* Entry doesn't exist - create new */ 1865 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1866 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1867 pe.index = index; 1868 1869 /* Continue - set next lookup */ 1870 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1871 1872 /* Set result info bits */ 1873 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1874 MVPP2_PRS_RI_L2_CAST_MASK); 1875 1876 /* Update tcam entry data first byte */ 1877 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1878 1879 /* Shift to ethertype */ 1880 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1882 1883 /* Mask all ports */ 1884 mvpp2_prs_tcam_port_map_set(&pe, 0); 1885 1886 /* Update shadow table */ 1887 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1888 } 1889 1890 /* Update port mask */ 1891 mvpp2_prs_tcam_port_set(&pe, port, add); 1892 1893 mvpp2_prs_hw_write(priv, &pe); 1894 } 1895 1896 /* Parser per-port initialization */ 1897 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1898 int lu_max, int offset) 1899 { 1900 u32 val; 1901 1902 /* Set lookup ID */ 1903 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1904 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1905 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1906 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1907 1908 /* Set maximum number of loops for packet received from port */ 1909 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1910 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1911 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1912 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1913 1914 /* Set initial offset for packet header extraction for the first 1915 * searching loop 1916 */ 1917 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1918 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1919 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1920 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1921 } 1922 1923 /* Default flow entries initialization for all ports */ 1924 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1925 { 1926 struct mvpp2_prs_entry pe; 1927 int port; 1928 1929 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1930 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1931 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1932 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1933 1934 /* Mask all ports */ 1935 mvpp2_prs_tcam_port_map_set(&pe, 0); 1936 1937 /* Set flow ID*/ 1938 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1939 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1940 1941 /* Update shadow table and hw entry */ 1942 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1943 mvpp2_prs_hw_write(priv, &pe); 1944 } 1945 } 1946 1947 /* Set default entry for Marvell Header field */ 1948 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1949 { 1950 struct mvpp2_prs_entry pe; 1951 1952 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1953 1954 pe.index = MVPP2_PE_MH_DEFAULT; 1955 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1956 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1957 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1958 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1959 1960 /* Unmask all ports */ 1961 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1962 1963 /* Update shadow table and hw entry */ 1964 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1965 mvpp2_prs_hw_write(priv, &pe); 1966 } 1967 1968 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1969 * multicast MAC addresses 1970 */ 1971 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1972 { 1973 struct mvpp2_prs_entry pe; 1974 1975 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1976 1977 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1978 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1979 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1980 1981 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1982 MVPP2_PRS_RI_DROP_MASK); 1983 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1984 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1985 1986 /* Unmask all ports */ 1987 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1988 1989 /* Update shadow table and hw entry */ 1990 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1991 mvpp2_prs_hw_write(priv, &pe); 1992 1993 /* place holders only - no ports */ 1994 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1995 mvpp2_prs_mac_promisc_set(priv, 0, false); 1996 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1997 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1998 } 1999 2000 /* Match basic ethertypes */ 2001 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 2002 { 2003 struct mvpp2_prs_entry pe; 2004 int tid; 2005 2006 /* Ethertype: PPPoE */ 2007 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2008 MVPP2_PE_LAST_FREE_TID); 2009 if (tid < 0) 2010 return tid; 2011 2012 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2013 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2014 pe.index = tid; 2015 2016 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2017 2018 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2019 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2020 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2021 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2022 MVPP2_PRS_RI_PPPOE_MASK); 2023 2024 /* Update shadow table and hw entry */ 2025 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2026 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2027 priv->prs_shadow[pe.index].finish = false; 2028 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2029 MVPP2_PRS_RI_PPPOE_MASK); 2030 mvpp2_prs_hw_write(priv, &pe); 2031 2032 /* Ethertype: ARP */ 2033 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2034 MVPP2_PE_LAST_FREE_TID); 2035 if (tid < 0) 2036 return tid; 2037 2038 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2039 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2040 pe.index = tid; 2041 2042 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2043 2044 /* Generate flow in the next iteration*/ 2045 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2046 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2047 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2048 MVPP2_PRS_RI_L3_PROTO_MASK); 2049 /* Set L3 offset */ 2050 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2051 MVPP2_ETH_TYPE_LEN, 2052 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2053 2054 /* Update shadow table and hw entry */ 2055 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2056 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2057 priv->prs_shadow[pe.index].finish = true; 2058 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2059 MVPP2_PRS_RI_L3_PROTO_MASK); 2060 mvpp2_prs_hw_write(priv, &pe); 2061 2062 /* Ethertype: LBTD */ 2063 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2064 MVPP2_PE_LAST_FREE_TID); 2065 if (tid < 0) 2066 return tid; 2067 2068 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2069 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2070 pe.index = tid; 2071 2072 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2073 2074 /* Generate flow in the next iteration*/ 2075 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2076 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2077 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2078 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2079 MVPP2_PRS_RI_CPU_CODE_MASK | 2080 MVPP2_PRS_RI_UDF3_MASK); 2081 /* Set L3 offset */ 2082 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2083 MVPP2_ETH_TYPE_LEN, 2084 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2085 2086 /* Update shadow table and hw entry */ 2087 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2088 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2089 priv->prs_shadow[pe.index].finish = true; 2090 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2091 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2092 MVPP2_PRS_RI_CPU_CODE_MASK | 2093 MVPP2_PRS_RI_UDF3_MASK); 2094 mvpp2_prs_hw_write(priv, &pe); 2095 2096 /* Ethertype: IPv4 without options */ 2097 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2098 MVPP2_PE_LAST_FREE_TID); 2099 if (tid < 0) 2100 return tid; 2101 2102 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2103 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2104 pe.index = tid; 2105 2106 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2107 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2108 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2109 MVPP2_PRS_IPV4_HEAD_MASK | 2110 MVPP2_PRS_IPV4_IHL_MASK); 2111 2112 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2113 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2114 MVPP2_PRS_RI_L3_PROTO_MASK); 2115 /* Skip eth_type + 4 bytes of IP header */ 2116 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2117 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2118 /* Set L3 offset */ 2119 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2120 MVPP2_ETH_TYPE_LEN, 2121 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2122 2123 /* Update shadow table and hw entry */ 2124 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2125 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2126 priv->prs_shadow[pe.index].finish = false; 2127 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2128 MVPP2_PRS_RI_L3_PROTO_MASK); 2129 mvpp2_prs_hw_write(priv, &pe); 2130 2131 /* Ethertype: IPv4 with options */ 2132 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2133 MVPP2_PE_LAST_FREE_TID); 2134 if (tid < 0) 2135 return tid; 2136 2137 pe.index = tid; 2138 2139 /* Clear tcam data before updating */ 2140 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2141 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2142 2143 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2144 MVPP2_PRS_IPV4_HEAD, 2145 MVPP2_PRS_IPV4_HEAD_MASK); 2146 2147 /* Clear ri before updating */ 2148 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2149 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2150 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2151 MVPP2_PRS_RI_L3_PROTO_MASK); 2152 2153 /* Update shadow table and hw entry */ 2154 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2155 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2156 priv->prs_shadow[pe.index].finish = false; 2157 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2158 MVPP2_PRS_RI_L3_PROTO_MASK); 2159 mvpp2_prs_hw_write(priv, &pe); 2160 2161 /* Ethertype: IPv6 without options */ 2162 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2163 MVPP2_PE_LAST_FREE_TID); 2164 if (tid < 0) 2165 return tid; 2166 2167 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2168 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2169 pe.index = tid; 2170 2171 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2172 2173 /* Skip DIP of IPV6 header */ 2174 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2175 MVPP2_MAX_L3_ADDR_SIZE, 2176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2177 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2178 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2179 MVPP2_PRS_RI_L3_PROTO_MASK); 2180 /* Set L3 offset */ 2181 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2182 MVPP2_ETH_TYPE_LEN, 2183 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2184 2185 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2186 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2187 priv->prs_shadow[pe.index].finish = false; 2188 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2189 MVPP2_PRS_RI_L3_PROTO_MASK); 2190 mvpp2_prs_hw_write(priv, &pe); 2191 2192 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2193 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2194 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2195 pe.index = MVPP2_PE_ETH_TYPE_UN; 2196 2197 /* Unmask all ports */ 2198 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2199 2200 /* Generate flow in the next iteration*/ 2201 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2202 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2203 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2204 MVPP2_PRS_RI_L3_PROTO_MASK); 2205 /* Set L3 offset even it's unknown L3 */ 2206 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2207 MVPP2_ETH_TYPE_LEN, 2208 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2209 2210 /* Update shadow table and hw entry */ 2211 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2212 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2213 priv->prs_shadow[pe.index].finish = true; 2214 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2215 MVPP2_PRS_RI_L3_PROTO_MASK); 2216 mvpp2_prs_hw_write(priv, &pe); 2217 2218 return 0; 2219 } 2220 2221 /* Parser default initialization */ 2222 static int mvpp2_prs_default_init(struct udevice *dev, 2223 struct mvpp2 *priv) 2224 { 2225 int err, index, i; 2226 2227 /* Enable tcam table */ 2228 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2229 2230 /* Clear all tcam and sram entries */ 2231 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2232 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2233 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2234 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2235 2236 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2237 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2238 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2239 } 2240 2241 /* Invalidate all tcam entries */ 2242 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2243 mvpp2_prs_hw_inv(priv, index); 2244 2245 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2246 sizeof(struct mvpp2_prs_shadow), 2247 GFP_KERNEL); 2248 if (!priv->prs_shadow) 2249 return -ENOMEM; 2250 2251 /* Always start from lookup = 0 */ 2252 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2253 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2254 MVPP2_PRS_PORT_LU_MAX, 0); 2255 2256 mvpp2_prs_def_flow_init(priv); 2257 2258 mvpp2_prs_mh_init(priv); 2259 2260 mvpp2_prs_mac_init(priv); 2261 2262 err = mvpp2_prs_etype_init(priv); 2263 if (err) 2264 return err; 2265 2266 return 0; 2267 } 2268 2269 /* Compare MAC DA with tcam entry data */ 2270 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2271 const u8 *da, unsigned char *mask) 2272 { 2273 unsigned char tcam_byte, tcam_mask; 2274 int index; 2275 2276 for (index = 0; index < ETH_ALEN; index++) { 2277 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2278 if (tcam_mask != mask[index]) 2279 return false; 2280 2281 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2282 return false; 2283 } 2284 2285 return true; 2286 } 2287 2288 /* Find tcam entry with matched pair <MAC DA, port> */ 2289 static struct mvpp2_prs_entry * 2290 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2291 unsigned char *mask, int udf_type) 2292 { 2293 struct mvpp2_prs_entry *pe; 2294 int tid; 2295 2296 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2297 if (!pe) 2298 return NULL; 2299 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2300 2301 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2302 for (tid = MVPP2_PE_FIRST_FREE_TID; 2303 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2304 unsigned int entry_pmap; 2305 2306 if (!priv->prs_shadow[tid].valid || 2307 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2308 (priv->prs_shadow[tid].udf != udf_type)) 2309 continue; 2310 2311 pe->index = tid; 2312 mvpp2_prs_hw_read(priv, pe); 2313 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2314 2315 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2316 entry_pmap == pmap) 2317 return pe; 2318 } 2319 kfree(pe); 2320 2321 return NULL; 2322 } 2323 2324 /* Update parser's mac da entry */ 2325 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2326 const u8 *da, bool add) 2327 { 2328 struct mvpp2_prs_entry *pe; 2329 unsigned int pmap, len, ri; 2330 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2331 int tid; 2332 2333 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2334 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2335 MVPP2_PRS_UDF_MAC_DEF); 2336 2337 /* No such entry */ 2338 if (!pe) { 2339 if (!add) 2340 return 0; 2341 2342 /* Create new TCAM entry */ 2343 /* Find first range mac entry*/ 2344 for (tid = MVPP2_PE_FIRST_FREE_TID; 2345 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2346 if (priv->prs_shadow[tid].valid && 2347 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2348 (priv->prs_shadow[tid].udf == 2349 MVPP2_PRS_UDF_MAC_RANGE)) 2350 break; 2351 2352 /* Go through the all entries from first to last */ 2353 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2354 tid - 1); 2355 if (tid < 0) 2356 return tid; 2357 2358 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2359 if (!pe) 2360 return -1; 2361 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2362 pe->index = tid; 2363 2364 /* Mask all ports */ 2365 mvpp2_prs_tcam_port_map_set(pe, 0); 2366 } 2367 2368 /* Update port mask */ 2369 mvpp2_prs_tcam_port_set(pe, port, add); 2370 2371 /* Invalidate the entry if no ports are left enabled */ 2372 pmap = mvpp2_prs_tcam_port_map_get(pe); 2373 if (pmap == 0) { 2374 if (add) { 2375 kfree(pe); 2376 return -1; 2377 } 2378 mvpp2_prs_hw_inv(priv, pe->index); 2379 priv->prs_shadow[pe->index].valid = false; 2380 kfree(pe); 2381 return 0; 2382 } 2383 2384 /* Continue - set next lookup */ 2385 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2386 2387 /* Set match on DA */ 2388 len = ETH_ALEN; 2389 while (len--) 2390 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2391 2392 /* Set result info bits */ 2393 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2394 2395 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2396 MVPP2_PRS_RI_MAC_ME_MASK); 2397 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2398 MVPP2_PRS_RI_MAC_ME_MASK); 2399 2400 /* Shift to ethertype */ 2401 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2402 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2403 2404 /* Update shadow table and hw entry */ 2405 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2406 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2407 mvpp2_prs_hw_write(priv, pe); 2408 2409 kfree(pe); 2410 2411 return 0; 2412 } 2413 2414 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2415 { 2416 int err; 2417 2418 /* Remove old parser entry */ 2419 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2420 false); 2421 if (err) 2422 return err; 2423 2424 /* Add new parser entry */ 2425 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2426 if (err) 2427 return err; 2428 2429 /* Set addr in the device */ 2430 memcpy(port->dev_addr, da, ETH_ALEN); 2431 2432 return 0; 2433 } 2434 2435 /* Set prs flow for the port */ 2436 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2437 { 2438 struct mvpp2_prs_entry *pe; 2439 int tid; 2440 2441 pe = mvpp2_prs_flow_find(port->priv, port->id); 2442 2443 /* Such entry not exist */ 2444 if (!pe) { 2445 /* Go through the all entires from last to first */ 2446 tid = mvpp2_prs_tcam_first_free(port->priv, 2447 MVPP2_PE_LAST_FREE_TID, 2448 MVPP2_PE_FIRST_FREE_TID); 2449 if (tid < 0) 2450 return tid; 2451 2452 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2453 if (!pe) 2454 return -ENOMEM; 2455 2456 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2457 pe->index = tid; 2458 2459 /* Set flow ID*/ 2460 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2461 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2462 2463 /* Update shadow table */ 2464 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2465 } 2466 2467 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2468 mvpp2_prs_hw_write(port->priv, pe); 2469 kfree(pe); 2470 2471 return 0; 2472 } 2473 2474 /* Classifier configuration routines */ 2475 2476 /* Update classification flow table registers */ 2477 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2478 struct mvpp2_cls_flow_entry *fe) 2479 { 2480 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2481 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2482 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2483 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2484 } 2485 2486 /* Update classification lookup table register */ 2487 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2488 struct mvpp2_cls_lookup_entry *le) 2489 { 2490 u32 val; 2491 2492 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2493 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2494 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2495 } 2496 2497 /* Classifier default initialization */ 2498 static void mvpp2_cls_init(struct mvpp2 *priv) 2499 { 2500 struct mvpp2_cls_lookup_entry le; 2501 struct mvpp2_cls_flow_entry fe; 2502 int index; 2503 2504 /* Enable classifier */ 2505 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2506 2507 /* Clear classifier flow table */ 2508 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2509 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2510 fe.index = index; 2511 mvpp2_cls_flow_write(priv, &fe); 2512 } 2513 2514 /* Clear classifier lookup table */ 2515 le.data = 0; 2516 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2517 le.lkpid = index; 2518 le.way = 0; 2519 mvpp2_cls_lookup_write(priv, &le); 2520 2521 le.way = 1; 2522 mvpp2_cls_lookup_write(priv, &le); 2523 } 2524 } 2525 2526 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2527 { 2528 struct mvpp2_cls_lookup_entry le; 2529 u32 val; 2530 2531 /* Set way for the port */ 2532 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2533 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2534 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2535 2536 /* Pick the entry to be accessed in lookup ID decoding table 2537 * according to the way and lkpid. 2538 */ 2539 le.lkpid = port->id; 2540 le.way = 0; 2541 le.data = 0; 2542 2543 /* Set initial CPU queue for receiving packets */ 2544 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2545 le.data |= port->first_rxq; 2546 2547 /* Disable classification engines */ 2548 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2549 2550 /* Update lookup ID table entry */ 2551 mvpp2_cls_lookup_write(port->priv, &le); 2552 } 2553 2554 /* Set CPU queue number for oversize packets */ 2555 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2556 { 2557 u32 val; 2558 2559 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2560 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2561 2562 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2563 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2564 2565 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2566 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2567 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2568 } 2569 2570 /* Buffer Manager configuration routines */ 2571 2572 /* Create pool */ 2573 static int mvpp2_bm_pool_create(struct udevice *dev, 2574 struct mvpp2 *priv, 2575 struct mvpp2_bm_pool *bm_pool, int size) 2576 { 2577 u32 val; 2578 2579 /* Number of buffer pointers must be a multiple of 16, as per 2580 * hardware constraints 2581 */ 2582 if (!IS_ALIGNED(size, 16)) 2583 return -EINVAL; 2584 2585 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2586 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2587 if (!bm_pool->virt_addr) 2588 return -ENOMEM; 2589 2590 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2591 MVPP2_BM_POOL_PTR_ALIGN)) { 2592 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2593 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2594 return -ENOMEM; 2595 } 2596 2597 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2598 lower_32_bits(bm_pool->dma_addr)); 2599 if (priv->hw_version == MVPP22) 2600 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG, 2601 (upper_32_bits(bm_pool->dma_addr) & 2602 MVPP22_BM_POOL_BASE_HIGH_MASK)); 2603 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2604 2605 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2606 val |= MVPP2_BM_START_MASK; 2607 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2608 2609 bm_pool->type = MVPP2_BM_FREE; 2610 bm_pool->size = size; 2611 bm_pool->pkt_size = 0; 2612 bm_pool->buf_num = 0; 2613 2614 return 0; 2615 } 2616 2617 /* Set pool buffer size */ 2618 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2619 struct mvpp2_bm_pool *bm_pool, 2620 int buf_size) 2621 { 2622 u32 val; 2623 2624 bm_pool->buf_size = buf_size; 2625 2626 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2627 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2628 } 2629 2630 /* Free all buffers from the pool */ 2631 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2632 struct mvpp2_bm_pool *bm_pool) 2633 { 2634 int i; 2635 2636 for (i = 0; i < bm_pool->buf_num; i++) { 2637 /* Allocate buffer back from the buffer manager */ 2638 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2639 } 2640 2641 bm_pool->buf_num = 0; 2642 } 2643 2644 /* Cleanup pool */ 2645 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2646 struct mvpp2 *priv, 2647 struct mvpp2_bm_pool *bm_pool) 2648 { 2649 u32 val; 2650 2651 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2652 if (bm_pool->buf_num) { 2653 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2654 return 0; 2655 } 2656 2657 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2658 val |= MVPP2_BM_STOP_MASK; 2659 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2660 2661 return 0; 2662 } 2663 2664 static int mvpp2_bm_pools_init(struct udevice *dev, 2665 struct mvpp2 *priv) 2666 { 2667 int i, err, size; 2668 struct mvpp2_bm_pool *bm_pool; 2669 2670 /* Create all pools with maximum size */ 2671 size = MVPP2_BM_POOL_SIZE_MAX; 2672 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2673 bm_pool = &priv->bm_pools[i]; 2674 bm_pool->id = i; 2675 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2676 if (err) 2677 goto err_unroll_pools; 2678 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE); 2679 } 2680 return 0; 2681 2682 err_unroll_pools: 2683 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2684 for (i = i - 1; i >= 0; i--) 2685 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2686 return err; 2687 } 2688 2689 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2690 { 2691 int i, err; 2692 2693 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2694 /* Mask BM all interrupts */ 2695 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2696 /* Clear BM cause register */ 2697 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2698 } 2699 2700 /* Allocate and initialize BM pools */ 2701 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2702 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2703 if (!priv->bm_pools) 2704 return -ENOMEM; 2705 2706 err = mvpp2_bm_pools_init(dev, priv); 2707 if (err < 0) 2708 return err; 2709 return 0; 2710 } 2711 2712 /* Attach long pool to rxq */ 2713 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2714 int lrxq, int long_pool) 2715 { 2716 u32 val, mask; 2717 int prxq; 2718 2719 /* Get queue physical ID */ 2720 prxq = port->rxqs[lrxq]->id; 2721 2722 if (port->priv->hw_version == MVPP21) 2723 mask = MVPP21_RXQ_POOL_LONG_MASK; 2724 else 2725 mask = MVPP22_RXQ_POOL_LONG_MASK; 2726 2727 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2728 val &= ~mask; 2729 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2730 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2731 } 2732 2733 /* Set pool number in a BM cookie */ 2734 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2735 { 2736 u32 bm; 2737 2738 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2739 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2740 2741 return bm; 2742 } 2743 2744 /* Get pool number from a BM cookie */ 2745 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2746 { 2747 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2748 } 2749 2750 /* Release buffer to BM */ 2751 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2752 dma_addr_t buf_dma_addr, 2753 unsigned long buf_phys_addr) 2754 { 2755 if (port->priv->hw_version == MVPP22) { 2756 u32 val = 0; 2757 2758 if (sizeof(dma_addr_t) == 8) 2759 val |= upper_32_bits(buf_dma_addr) & 2760 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2761 2762 if (sizeof(phys_addr_t) == 8) 2763 val |= (upper_32_bits(buf_phys_addr) 2764 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2765 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2766 2767 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2768 } 2769 2770 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2771 * returned in the "cookie" field of the RX 2772 * descriptor. Instead of storing the virtual address, we 2773 * store the physical address 2774 */ 2775 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2776 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2777 } 2778 2779 /* Refill BM pool */ 2780 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2781 dma_addr_t dma_addr, 2782 phys_addr_t phys_addr) 2783 { 2784 int pool = mvpp2_bm_cookie_pool_get(bm); 2785 2786 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2787 } 2788 2789 /* Allocate buffers for the pool */ 2790 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2791 struct mvpp2_bm_pool *bm_pool, int buf_num) 2792 { 2793 int i; 2794 2795 if (buf_num < 0 || 2796 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2797 netdev_err(port->dev, 2798 "cannot allocate %d buffers for pool %d\n", 2799 buf_num, bm_pool->id); 2800 return 0; 2801 } 2802 2803 for (i = 0; i < buf_num; i++) { 2804 mvpp2_bm_pool_put(port, bm_pool->id, 2805 (dma_addr_t)buffer_loc.rx_buffer[i], 2806 (unsigned long)buffer_loc.rx_buffer[i]); 2807 2808 } 2809 2810 /* Update BM driver with number of buffers added to pool */ 2811 bm_pool->buf_num += i; 2812 2813 return i; 2814 } 2815 2816 /* Notify the driver that BM pool is being used as specific type and return the 2817 * pool pointer on success 2818 */ 2819 static struct mvpp2_bm_pool * 2820 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2821 int pkt_size) 2822 { 2823 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2824 int num; 2825 2826 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2827 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2828 return NULL; 2829 } 2830 2831 if (new_pool->type == MVPP2_BM_FREE) 2832 new_pool->type = type; 2833 2834 /* Allocate buffers in case BM pool is used as long pool, but packet 2835 * size doesn't match MTU or BM pool hasn't being used yet 2836 */ 2837 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2838 (new_pool->pkt_size == 0)) { 2839 int pkts_num; 2840 2841 /* Set default buffer number or free all the buffers in case 2842 * the pool is not empty 2843 */ 2844 pkts_num = new_pool->buf_num; 2845 if (pkts_num == 0) 2846 pkts_num = type == MVPP2_BM_SWF_LONG ? 2847 MVPP2_BM_LONG_BUF_NUM : 2848 MVPP2_BM_SHORT_BUF_NUM; 2849 else 2850 mvpp2_bm_bufs_free(NULL, 2851 port->priv, new_pool); 2852 2853 new_pool->pkt_size = pkt_size; 2854 2855 /* Allocate buffers for this pool */ 2856 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2857 if (num != pkts_num) { 2858 dev_err(dev, "pool %d: %d of %d allocated\n", 2859 new_pool->id, num, pkts_num); 2860 return NULL; 2861 } 2862 } 2863 2864 return new_pool; 2865 } 2866 2867 /* Initialize pools for swf */ 2868 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2869 { 2870 int rxq; 2871 2872 if (!port->pool_long) { 2873 port->pool_long = 2874 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2875 MVPP2_BM_SWF_LONG, 2876 port->pkt_size); 2877 if (!port->pool_long) 2878 return -ENOMEM; 2879 2880 port->pool_long->port_map |= (1 << port->id); 2881 2882 for (rxq = 0; rxq < rxq_number; rxq++) 2883 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2884 } 2885 2886 return 0; 2887 } 2888 2889 /* Port configuration routines */ 2890 2891 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2892 { 2893 u32 val; 2894 2895 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2896 2897 switch (port->phy_interface) { 2898 case PHY_INTERFACE_MODE_SGMII: 2899 val |= MVPP2_GMAC_INBAND_AN_MASK; 2900 break; 2901 case PHY_INTERFACE_MODE_RGMII: 2902 case PHY_INTERFACE_MODE_RGMII_ID: 2903 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2904 default: 2905 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2906 } 2907 2908 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2909 } 2910 2911 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2912 { 2913 u32 val; 2914 2915 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2916 val |= MVPP2_GMAC_FC_ADV_EN; 2917 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2918 } 2919 2920 static void mvpp2_port_enable(struct mvpp2_port *port) 2921 { 2922 u32 val; 2923 2924 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2925 val |= MVPP2_GMAC_PORT_EN_MASK; 2926 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2927 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2928 } 2929 2930 static void mvpp2_port_disable(struct mvpp2_port *port) 2931 { 2932 u32 val; 2933 2934 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2935 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2936 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2937 } 2938 2939 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2940 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2941 { 2942 u32 val; 2943 2944 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2945 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2946 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2947 } 2948 2949 /* Configure loopback port */ 2950 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2951 { 2952 u32 val; 2953 2954 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2955 2956 if (port->speed == 1000) 2957 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2958 else 2959 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2960 2961 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2962 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2963 else 2964 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2965 2966 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2967 } 2968 2969 static void mvpp2_port_reset(struct mvpp2_port *port) 2970 { 2971 u32 val; 2972 2973 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2974 ~MVPP2_GMAC_PORT_RESET_MASK; 2975 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2976 2977 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2978 MVPP2_GMAC_PORT_RESET_MASK) 2979 continue; 2980 } 2981 2982 /* Change maximum receive size of the port */ 2983 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2984 { 2985 u32 val; 2986 2987 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2988 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2989 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2990 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2991 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2992 } 2993 2994 /* PPv2.2 GoP/GMAC config */ 2995 2996 /* Set the MAC to reset or exit from reset */ 2997 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2998 { 2999 u32 val; 3000 3001 /* read - modify - write */ 3002 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3003 if (reset) 3004 val |= MVPP2_GMAC_PORT_RESET_MASK; 3005 else 3006 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 3007 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3008 3009 return 0; 3010 } 3011 3012 /* 3013 * gop_gpcs_mode_cfg 3014 * 3015 * Configure port to working with Gig PCS or don't. 3016 */ 3017 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3018 { 3019 u32 val; 3020 3021 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3022 if (en) 3023 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3024 else 3025 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3026 /* enable / disable PCS on this port */ 3027 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3028 3029 return 0; 3030 } 3031 3032 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3033 { 3034 u32 val; 3035 3036 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3037 if (en) 3038 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3039 else 3040 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3041 /* enable / disable PCS on this port */ 3042 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3043 3044 return 0; 3045 } 3046 3047 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3048 { 3049 u32 val, thresh; 3050 3051 /* 3052 * Configure minimal level of the Tx FIFO before the lower part 3053 * starts to read a packet 3054 */ 3055 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3056 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3057 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3058 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3059 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3060 3061 /* Disable bypass of sync module */ 3062 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3063 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3064 /* configure DP clock select according to mode */ 3065 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3066 /* configure QSGMII bypass according to mode */ 3067 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3068 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3069 3070 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3071 /* 3072 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3073 * transceiver 3074 */ 3075 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3076 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3077 3078 /* configure AN 0x9268 */ 3079 val = MVPP2_GMAC_EN_PCS_AN | 3080 MVPP2_GMAC_AN_BYPASS_EN | 3081 MVPP2_GMAC_CONFIG_MII_SPEED | 3082 MVPP2_GMAC_CONFIG_GMII_SPEED | 3083 MVPP2_GMAC_FC_ADV_EN | 3084 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3085 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3086 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3087 } 3088 3089 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3090 { 3091 u32 val, thresh; 3092 3093 /* 3094 * Configure minimal level of the Tx FIFO before the lower part 3095 * starts to read a packet 3096 */ 3097 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3098 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3099 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3100 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3101 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3102 3103 /* Disable bypass of sync module */ 3104 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3105 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3106 /* configure DP clock select according to mode */ 3107 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3108 /* configure QSGMII bypass according to mode */ 3109 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3110 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3111 3112 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3113 /* configure GIG MAC to SGMII mode */ 3114 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3115 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3116 3117 /* configure AN */ 3118 val = MVPP2_GMAC_EN_PCS_AN | 3119 MVPP2_GMAC_AN_BYPASS_EN | 3120 MVPP2_GMAC_AN_SPEED_EN | 3121 MVPP2_GMAC_EN_FC_AN | 3122 MVPP2_GMAC_AN_DUPLEX_EN | 3123 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3124 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3125 } 3126 3127 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3128 { 3129 u32 val, thresh; 3130 3131 /* 3132 * Configure minimal level of the Tx FIFO before the lower part 3133 * starts to read a packet 3134 */ 3135 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3136 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3137 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3138 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3139 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3140 3141 /* Disable bypass of sync module */ 3142 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3143 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3144 /* configure DP clock select according to mode */ 3145 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3146 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3147 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3148 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3149 3150 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3151 /* configure GIG MAC to SGMII mode */ 3152 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3153 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3154 3155 /* configure AN 0xb8e8 */ 3156 val = MVPP2_GMAC_AN_BYPASS_EN | 3157 MVPP2_GMAC_AN_SPEED_EN | 3158 MVPP2_GMAC_EN_FC_AN | 3159 MVPP2_GMAC_AN_DUPLEX_EN | 3160 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3161 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3162 } 3163 3164 /* Set the internal mux's to the required MAC in the GOP */ 3165 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3166 { 3167 u32 val; 3168 3169 /* Set TX FIFO thresholds */ 3170 switch (port->phy_interface) { 3171 case PHY_INTERFACE_MODE_SGMII: 3172 if (port->phy_speed == 2500) 3173 gop_gmac_sgmii2_5_cfg(port); 3174 else 3175 gop_gmac_sgmii_cfg(port); 3176 break; 3177 3178 case PHY_INTERFACE_MODE_RGMII: 3179 case PHY_INTERFACE_MODE_RGMII_ID: 3180 gop_gmac_rgmii_cfg(port); 3181 break; 3182 3183 default: 3184 return -1; 3185 } 3186 3187 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3188 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3189 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3190 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3191 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3192 3193 /* PeriodicXonEn disable */ 3194 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3195 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3196 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3197 3198 return 0; 3199 } 3200 3201 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3202 { 3203 u32 val; 3204 3205 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3206 if (port->gop_id > 0) 3207 return; 3208 3209 /* configure 1Gig MAC mode */ 3210 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3211 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3212 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3213 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3214 } 3215 3216 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3217 { 3218 u32 val; 3219 3220 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3221 if (reset) 3222 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3223 else 3224 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3225 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3226 3227 return 0; 3228 } 3229 3230 /* Set the internal mux's to the required PCS in the PI */ 3231 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3232 { 3233 u32 val; 3234 int lane; 3235 3236 switch (num_of_lanes) { 3237 case 1: 3238 lane = 0; 3239 break; 3240 case 2: 3241 lane = 1; 3242 break; 3243 case 4: 3244 lane = 2; 3245 break; 3246 default: 3247 return -1; 3248 } 3249 3250 /* configure XG MAC mode */ 3251 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3252 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3253 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3254 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3255 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3256 3257 return 0; 3258 } 3259 3260 static int gop_mpcs_mode(struct mvpp2_port *port) 3261 { 3262 u32 val; 3263 3264 /* configure PCS40G COMMON CONTROL */ 3265 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3266 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3267 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3268 3269 /* configure PCS CLOCK RESET */ 3270 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3271 val &= ~CLK_DIVISION_RATIO_MASK; 3272 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3273 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3274 3275 val &= ~CLK_DIV_PHASE_SET_MASK; 3276 val |= MAC_CLK_RESET_MASK; 3277 val |= RX_SD_CLK_RESET_MASK; 3278 val |= TX_SD_CLK_RESET_MASK; 3279 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3280 3281 return 0; 3282 } 3283 3284 /* Set the internal mux's to the required MAC in the GOP */ 3285 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3286 { 3287 u32 val; 3288 3289 /* configure 10G MAC mode */ 3290 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3291 val |= MVPP22_XLG_RX_FC_EN; 3292 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3293 3294 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3295 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3296 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3297 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3298 3299 /* read - modify - write */ 3300 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3301 val &= ~MVPP22_XLG_MODE_DMA_1G; 3302 val |= MVPP22_XLG_FORWARD_PFC_EN; 3303 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3304 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3305 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3306 3307 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3308 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3309 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3310 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3311 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3312 3313 /* unmask link change interrupt */ 3314 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3315 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3316 val |= 1; /* unmask summary bit */ 3317 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3318 3319 return 0; 3320 } 3321 3322 /* Set PCS to reset or exit from reset */ 3323 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3324 { 3325 u32 val; 3326 3327 /* read - modify - write */ 3328 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3329 if (reset) 3330 val &= ~MVPP22_XPCS_PCSRESET; 3331 else 3332 val |= MVPP22_XPCS_PCSRESET; 3333 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3334 3335 return 0; 3336 } 3337 3338 /* Set the MAC to reset or exit from reset */ 3339 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3340 { 3341 u32 val; 3342 3343 /* read - modify - write */ 3344 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3345 if (reset) 3346 val &= ~MVPP22_XLG_MAC_RESETN; 3347 else 3348 val |= MVPP22_XLG_MAC_RESETN; 3349 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3350 3351 return 0; 3352 } 3353 3354 /* 3355 * gop_port_init 3356 * 3357 * Init physical port. Configures the port mode and all it's elements 3358 * accordingly. 3359 * Does not verify that the selected mode/port number is valid at the 3360 * core level. 3361 */ 3362 static int gop_port_init(struct mvpp2_port *port) 3363 { 3364 int mac_num = port->gop_id; 3365 int num_of_act_lanes; 3366 3367 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3368 netdev_err(NULL, "%s: illegal port number %d", __func__, 3369 mac_num); 3370 return -1; 3371 } 3372 3373 switch (port->phy_interface) { 3374 case PHY_INTERFACE_MODE_RGMII: 3375 case PHY_INTERFACE_MODE_RGMII_ID: 3376 gop_gmac_reset(port, 1); 3377 3378 /* configure PCS */ 3379 gop_gpcs_mode_cfg(port, 0); 3380 gop_bypass_clk_cfg(port, 1); 3381 3382 /* configure MAC */ 3383 gop_gmac_mode_cfg(port); 3384 /* pcs unreset */ 3385 gop_gpcs_reset(port, 0); 3386 3387 /* mac unreset */ 3388 gop_gmac_reset(port, 0); 3389 break; 3390 3391 case PHY_INTERFACE_MODE_SGMII: 3392 /* configure PCS */ 3393 gop_gpcs_mode_cfg(port, 1); 3394 3395 /* configure MAC */ 3396 gop_gmac_mode_cfg(port); 3397 /* select proper Mac mode */ 3398 gop_xlg_2_gig_mac_cfg(port); 3399 3400 /* pcs unreset */ 3401 gop_gpcs_reset(port, 0); 3402 /* mac unreset */ 3403 gop_gmac_reset(port, 0); 3404 break; 3405 3406 case PHY_INTERFACE_MODE_SFI: 3407 num_of_act_lanes = 2; 3408 mac_num = 0; 3409 /* configure PCS */ 3410 gop_xpcs_mode(port, num_of_act_lanes); 3411 gop_mpcs_mode(port); 3412 /* configure MAC */ 3413 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3414 3415 /* pcs unreset */ 3416 gop_xpcs_reset(port, 0); 3417 3418 /* mac unreset */ 3419 gop_xlg_mac_reset(port, 0); 3420 break; 3421 3422 default: 3423 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3424 __func__, port->phy_interface); 3425 return -1; 3426 } 3427 3428 return 0; 3429 } 3430 3431 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3432 { 3433 u32 val; 3434 3435 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3436 if (enable) { 3437 /* Enable port and MIB counters update */ 3438 val |= MVPP22_XLG_PORT_EN; 3439 val &= ~MVPP22_XLG_MIBCNT_DIS; 3440 } else { 3441 /* Disable port */ 3442 val &= ~MVPP22_XLG_PORT_EN; 3443 } 3444 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3445 } 3446 3447 static void gop_port_enable(struct mvpp2_port *port, int enable) 3448 { 3449 switch (port->phy_interface) { 3450 case PHY_INTERFACE_MODE_RGMII: 3451 case PHY_INTERFACE_MODE_RGMII_ID: 3452 case PHY_INTERFACE_MODE_SGMII: 3453 if (enable) 3454 mvpp2_port_enable(port); 3455 else 3456 mvpp2_port_disable(port); 3457 break; 3458 3459 case PHY_INTERFACE_MODE_SFI: 3460 gop_xlg_mac_port_enable(port, enable); 3461 3462 break; 3463 default: 3464 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3465 port->phy_interface); 3466 return; 3467 } 3468 } 3469 3470 /* RFU1 functions */ 3471 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3472 { 3473 return readl(priv->rfu1_base + offset); 3474 } 3475 3476 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3477 { 3478 writel(data, priv->rfu1_base + offset); 3479 } 3480 3481 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3482 { 3483 u32 val = 0; 3484 3485 if (gop_id == 2) { 3486 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3487 val |= MV_NETC_GE_MAC2_SGMII; 3488 } 3489 3490 if (gop_id == 3) { 3491 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3492 val |= MV_NETC_GE_MAC3_SGMII; 3493 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3494 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3495 val |= MV_NETC_GE_MAC3_RGMII; 3496 } 3497 3498 return val; 3499 } 3500 3501 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3502 { 3503 u32 reg; 3504 3505 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3506 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3507 3508 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3509 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3510 3511 reg |= val; 3512 3513 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3514 } 3515 3516 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3517 { 3518 u32 reg; 3519 3520 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3521 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3522 3523 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3524 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3525 3526 reg |= val; 3527 3528 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3529 } 3530 3531 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3532 { 3533 u32 reg; 3534 3535 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3536 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3537 3538 val <<= NETC_GOP_SOFT_RESET_OFFS; 3539 val &= NETC_GOP_SOFT_RESET_MASK; 3540 3541 reg |= val; 3542 3543 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3544 } 3545 3546 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3547 { 3548 u32 reg; 3549 3550 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3551 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3552 3553 val <<= NETC_CLK_DIV_PHASE_OFFS; 3554 val &= NETC_CLK_DIV_PHASE_MASK; 3555 3556 reg |= val; 3557 3558 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3559 } 3560 3561 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3562 { 3563 u32 reg; 3564 3565 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3566 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3567 3568 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3569 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3570 3571 reg |= val; 3572 3573 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3574 } 3575 3576 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3577 u32 val) 3578 { 3579 u32 reg, mask, offset; 3580 3581 if (gop_id == 2) { 3582 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3583 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3584 } else { 3585 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3586 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3587 } 3588 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3589 reg &= ~mask; 3590 3591 val <<= offset; 3592 val &= mask; 3593 3594 reg |= val; 3595 3596 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3597 } 3598 3599 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3600 { 3601 u32 reg; 3602 3603 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3604 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3605 3606 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3607 val &= NETC_BUS_WIDTH_SELECT_MASK; 3608 3609 reg |= val; 3610 3611 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3612 } 3613 3614 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3615 { 3616 u32 reg; 3617 3618 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3619 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3620 3621 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3622 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3623 3624 reg |= val; 3625 3626 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3627 } 3628 3629 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3630 enum mv_netc_phase phase) 3631 { 3632 switch (phase) { 3633 case MV_NETC_FIRST_PHASE: 3634 /* Set Bus Width to HB mode = 1 */ 3635 gop_netc_bus_width_select(priv, 1); 3636 /* Select RGMII mode */ 3637 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3638 break; 3639 3640 case MV_NETC_SECOND_PHASE: 3641 /* De-assert the relevant port HB reset */ 3642 gop_netc_port_rf_reset(priv, gop_id, 1); 3643 break; 3644 } 3645 } 3646 3647 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3648 enum mv_netc_phase phase) 3649 { 3650 switch (phase) { 3651 case MV_NETC_FIRST_PHASE: 3652 /* Set Bus Width to HB mode = 1 */ 3653 gop_netc_bus_width_select(priv, 1); 3654 /* Select SGMII mode */ 3655 if (gop_id >= 1) { 3656 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3657 MV_NETC_GBE_SGMII); 3658 } 3659 3660 /* Configure the sample stages */ 3661 gop_netc_sample_stages_timing(priv, 0); 3662 /* Configure the ComPhy Selector */ 3663 /* gop_netc_com_phy_selector_config(netComplex); */ 3664 break; 3665 3666 case MV_NETC_SECOND_PHASE: 3667 /* De-assert the relevant port HB reset */ 3668 gop_netc_port_rf_reset(priv, gop_id, 1); 3669 break; 3670 } 3671 } 3672 3673 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3674 { 3675 u32 c = priv->netc_config; 3676 3677 if (c & MV_NETC_GE_MAC2_SGMII) 3678 gop_netc_mac_to_sgmii(priv, 2, phase); 3679 else 3680 gop_netc_mac_to_xgmii(priv, 2, phase); 3681 3682 if (c & MV_NETC_GE_MAC3_SGMII) { 3683 gop_netc_mac_to_sgmii(priv, 3, phase); 3684 } else { 3685 gop_netc_mac_to_xgmii(priv, 3, phase); 3686 if (c & MV_NETC_GE_MAC3_RGMII) 3687 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3688 else 3689 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3690 } 3691 3692 /* Activate gop ports 0, 2, 3 */ 3693 gop_netc_active_port(priv, 0, 1); 3694 gop_netc_active_port(priv, 2, 1); 3695 gop_netc_active_port(priv, 3, 1); 3696 3697 if (phase == MV_NETC_SECOND_PHASE) { 3698 /* Enable the GOP internal clock logic */ 3699 gop_netc_gop_clock_logic_set(priv, 1); 3700 /* De-assert GOP unit reset */ 3701 gop_netc_gop_reset(priv, 1); 3702 } 3703 3704 return 0; 3705 } 3706 3707 /* Set defaults to the MVPP2 port */ 3708 static void mvpp2_defaults_set(struct mvpp2_port *port) 3709 { 3710 int tx_port_num, val, queue, ptxq, lrxq; 3711 3712 if (port->priv->hw_version == MVPP21) { 3713 /* Configure port to loopback if needed */ 3714 if (port->flags & MVPP2_F_LOOPBACK) 3715 mvpp2_port_loopback_set(port); 3716 3717 /* Update TX FIFO MIN Threshold */ 3718 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3719 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3720 /* Min. TX threshold must be less than minimal packet length */ 3721 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3722 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3723 } 3724 3725 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3726 tx_port_num = mvpp2_egress_port(port); 3727 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3728 tx_port_num); 3729 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3730 3731 /* Close bandwidth for all queues */ 3732 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3733 ptxq = mvpp2_txq_phys(port->id, queue); 3734 mvpp2_write(port->priv, 3735 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3736 } 3737 3738 /* Set refill period to 1 usec, refill tokens 3739 * and bucket size to maximum 3740 */ 3741 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3742 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3743 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3744 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3745 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3746 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3747 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3748 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3749 3750 /* Set MaximumLowLatencyPacketSize value to 256 */ 3751 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3752 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3753 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3754 3755 /* Enable Rx cache snoop */ 3756 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3757 queue = port->rxqs[lrxq]->id; 3758 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3759 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3760 MVPP2_SNOOP_BUF_HDR_MASK; 3761 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3762 } 3763 } 3764 3765 /* Enable/disable receiving packets */ 3766 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3767 { 3768 u32 val; 3769 int lrxq, queue; 3770 3771 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3772 queue = port->rxqs[lrxq]->id; 3773 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3774 val &= ~MVPP2_RXQ_DISABLE_MASK; 3775 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3776 } 3777 } 3778 3779 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3780 { 3781 u32 val; 3782 int lrxq, queue; 3783 3784 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3785 queue = port->rxqs[lrxq]->id; 3786 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3787 val |= MVPP2_RXQ_DISABLE_MASK; 3788 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3789 } 3790 } 3791 3792 /* Enable transmit via physical egress queue 3793 * - HW starts take descriptors from DRAM 3794 */ 3795 static void mvpp2_egress_enable(struct mvpp2_port *port) 3796 { 3797 u32 qmap; 3798 int queue; 3799 int tx_port_num = mvpp2_egress_port(port); 3800 3801 /* Enable all initialized TXs. */ 3802 qmap = 0; 3803 for (queue = 0; queue < txq_number; queue++) { 3804 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3805 3806 if (txq->descs != NULL) 3807 qmap |= (1 << queue); 3808 } 3809 3810 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3811 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3812 } 3813 3814 /* Disable transmit via physical egress queue 3815 * - HW doesn't take descriptors from DRAM 3816 */ 3817 static void mvpp2_egress_disable(struct mvpp2_port *port) 3818 { 3819 u32 reg_data; 3820 int delay; 3821 int tx_port_num = mvpp2_egress_port(port); 3822 3823 /* Issue stop command for active channels only */ 3824 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3825 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3826 MVPP2_TXP_SCHED_ENQ_MASK; 3827 if (reg_data != 0) 3828 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3829 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3830 3831 /* Wait for all Tx activity to terminate. */ 3832 delay = 0; 3833 do { 3834 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3835 netdev_warn(port->dev, 3836 "Tx stop timed out, status=0x%08x\n", 3837 reg_data); 3838 break; 3839 } 3840 mdelay(1); 3841 delay++; 3842 3843 /* Check port TX Command register that all 3844 * Tx queues are stopped 3845 */ 3846 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3847 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3848 } 3849 3850 /* Rx descriptors helper methods */ 3851 3852 /* Get number of Rx descriptors occupied by received packets */ 3853 static inline int 3854 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3855 { 3856 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3857 3858 return val & MVPP2_RXQ_OCCUPIED_MASK; 3859 } 3860 3861 /* Update Rx queue status with the number of occupied and available 3862 * Rx descriptor slots. 3863 */ 3864 static inline void 3865 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3866 int used_count, int free_count) 3867 { 3868 /* Decrement the number of used descriptors and increment count 3869 * increment the number of free descriptors. 3870 */ 3871 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3872 3873 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3874 } 3875 3876 /* Get pointer to next RX descriptor to be processed by SW */ 3877 static inline struct mvpp2_rx_desc * 3878 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3879 { 3880 int rx_desc = rxq->next_desc_to_proc; 3881 3882 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3883 prefetch(rxq->descs + rxq->next_desc_to_proc); 3884 return rxq->descs + rx_desc; 3885 } 3886 3887 /* Set rx queue offset */ 3888 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3889 int prxq, int offset) 3890 { 3891 u32 val; 3892 3893 /* Convert offset from bytes to units of 32 bytes */ 3894 offset = offset >> 5; 3895 3896 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3897 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3898 3899 /* Offset is in */ 3900 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3901 MVPP2_RXQ_PACKET_OFFSET_MASK); 3902 3903 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3904 } 3905 3906 /* Obtain BM cookie information from descriptor */ 3907 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3908 struct mvpp2_rx_desc *rx_desc) 3909 { 3910 int cpu = smp_processor_id(); 3911 int pool; 3912 3913 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3914 MVPP2_RXD_BM_POOL_ID_MASK) >> 3915 MVPP2_RXD_BM_POOL_ID_OFFS; 3916 3917 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3918 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3919 } 3920 3921 /* Tx descriptors helper methods */ 3922 3923 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3924 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3925 struct mvpp2_tx_queue *txq) 3926 { 3927 u32 val; 3928 3929 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3930 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3931 3932 return val & MVPP2_TXQ_PENDING_MASK; 3933 } 3934 3935 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3936 static struct mvpp2_tx_desc * 3937 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3938 { 3939 int tx_desc = txq->next_desc_to_proc; 3940 3941 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3942 return txq->descs + tx_desc; 3943 } 3944 3945 /* Update HW with number of aggregated Tx descriptors to be sent */ 3946 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3947 { 3948 /* aggregated access - relevant TXQ number is written in TX desc */ 3949 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3950 } 3951 3952 /* Get number of sent descriptors and decrement counter. 3953 * The number of sent descriptors is returned. 3954 * Per-CPU access 3955 */ 3956 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3957 struct mvpp2_tx_queue *txq) 3958 { 3959 u32 val; 3960 3961 /* Reading status reg resets transmitted descriptor counter */ 3962 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3963 3964 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3965 MVPP2_TRANSMITTED_COUNT_OFFSET; 3966 } 3967 3968 static void mvpp2_txq_sent_counter_clear(void *arg) 3969 { 3970 struct mvpp2_port *port = arg; 3971 int queue; 3972 3973 for (queue = 0; queue < txq_number; queue++) { 3974 int id = port->txqs[queue]->id; 3975 3976 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3977 } 3978 } 3979 3980 /* Set max sizes for Tx queues */ 3981 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3982 { 3983 u32 val, size, mtu; 3984 int txq, tx_port_num; 3985 3986 mtu = port->pkt_size * 8; 3987 if (mtu > MVPP2_TXP_MTU_MAX) 3988 mtu = MVPP2_TXP_MTU_MAX; 3989 3990 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3991 mtu = 3 * mtu; 3992 3993 /* Indirect access to registers */ 3994 tx_port_num = mvpp2_egress_port(port); 3995 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3996 3997 /* Set MTU */ 3998 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 3999 val &= ~MVPP2_TXP_MTU_MAX; 4000 val |= mtu; 4001 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 4002 4003 /* TXP token size and all TXQs token size must be larger that MTU */ 4004 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4005 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4006 if (size < mtu) { 4007 size = mtu; 4008 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4009 val |= size; 4010 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4011 } 4012 4013 for (txq = 0; txq < txq_number; txq++) { 4014 val = mvpp2_read(port->priv, 4015 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4016 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4017 4018 if (size < mtu) { 4019 size = mtu; 4020 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4021 val |= size; 4022 mvpp2_write(port->priv, 4023 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4024 val); 4025 } 4026 } 4027 } 4028 4029 /* Free Tx queue skbuffs */ 4030 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4031 struct mvpp2_tx_queue *txq, 4032 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4033 { 4034 int i; 4035 4036 for (i = 0; i < num; i++) 4037 mvpp2_txq_inc_get(txq_pcpu); 4038 } 4039 4040 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4041 u32 cause) 4042 { 4043 int queue = fls(cause) - 1; 4044 4045 return port->rxqs[queue]; 4046 } 4047 4048 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4049 u32 cause) 4050 { 4051 int queue = fls(cause) - 1; 4052 4053 return port->txqs[queue]; 4054 } 4055 4056 /* Rx/Tx queue initialization/cleanup methods */ 4057 4058 /* Allocate and initialize descriptors for aggr TXQ */ 4059 static int mvpp2_aggr_txq_init(struct udevice *dev, 4060 struct mvpp2_tx_queue *aggr_txq, 4061 int desc_num, int cpu, 4062 struct mvpp2 *priv) 4063 { 4064 u32 txq_dma; 4065 4066 /* Allocate memory for TX descriptors */ 4067 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4068 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4069 if (!aggr_txq->descs) 4070 return -ENOMEM; 4071 4072 /* Make sure descriptor address is cache line size aligned */ 4073 BUG_ON(aggr_txq->descs != 4074 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4075 4076 aggr_txq->last_desc = aggr_txq->size - 1; 4077 4078 /* Aggr TXQ no reset WA */ 4079 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4080 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4081 4082 /* Set Tx descriptors queue starting address indirect 4083 * access 4084 */ 4085 if (priv->hw_version == MVPP21) 4086 txq_dma = aggr_txq->descs_dma; 4087 else 4088 txq_dma = aggr_txq->descs_dma >> 4089 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4090 4091 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4092 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4093 4094 return 0; 4095 } 4096 4097 /* Create a specified Rx queue */ 4098 static int mvpp2_rxq_init(struct mvpp2_port *port, 4099 struct mvpp2_rx_queue *rxq) 4100 4101 { 4102 u32 rxq_dma; 4103 4104 rxq->size = port->rx_ring_size; 4105 4106 /* Allocate memory for RX descriptors */ 4107 rxq->descs = buffer_loc.rx_descs; 4108 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4109 if (!rxq->descs) 4110 return -ENOMEM; 4111 4112 BUG_ON(rxq->descs != 4113 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4114 4115 rxq->last_desc = rxq->size - 1; 4116 4117 /* Zero occupied and non-occupied counters - direct access */ 4118 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4119 4120 /* Set Rx descriptors queue starting address - indirect access */ 4121 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4122 if (port->priv->hw_version == MVPP21) 4123 rxq_dma = rxq->descs_dma; 4124 else 4125 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4126 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4127 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4128 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4129 4130 /* Set Offset */ 4131 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4132 4133 /* Add number of descriptors ready for receiving packets */ 4134 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4135 4136 return 0; 4137 } 4138 4139 /* Push packets received by the RXQ to BM pool */ 4140 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4141 struct mvpp2_rx_queue *rxq) 4142 { 4143 int rx_received, i; 4144 4145 rx_received = mvpp2_rxq_received(port, rxq->id); 4146 if (!rx_received) 4147 return; 4148 4149 for (i = 0; i < rx_received; i++) { 4150 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4151 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4152 4153 mvpp2_pool_refill(port, bm, 4154 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4155 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4156 } 4157 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4158 } 4159 4160 /* Cleanup Rx queue */ 4161 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4162 struct mvpp2_rx_queue *rxq) 4163 { 4164 mvpp2_rxq_drop_pkts(port, rxq); 4165 4166 rxq->descs = NULL; 4167 rxq->last_desc = 0; 4168 rxq->next_desc_to_proc = 0; 4169 rxq->descs_dma = 0; 4170 4171 /* Clear Rx descriptors queue starting address and size; 4172 * free descriptor number 4173 */ 4174 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4175 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4176 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4177 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4178 } 4179 4180 /* Create and initialize a Tx queue */ 4181 static int mvpp2_txq_init(struct mvpp2_port *port, 4182 struct mvpp2_tx_queue *txq) 4183 { 4184 u32 val; 4185 int cpu, desc, desc_per_txq, tx_port_num; 4186 struct mvpp2_txq_pcpu *txq_pcpu; 4187 4188 txq->size = port->tx_ring_size; 4189 4190 /* Allocate memory for Tx descriptors */ 4191 txq->descs = buffer_loc.tx_descs; 4192 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4193 if (!txq->descs) 4194 return -ENOMEM; 4195 4196 /* Make sure descriptor address is cache line size aligned */ 4197 BUG_ON(txq->descs != 4198 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4199 4200 txq->last_desc = txq->size - 1; 4201 4202 /* Set Tx descriptors queue starting address - indirect access */ 4203 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4204 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4205 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4206 MVPP2_TXQ_DESC_SIZE_MASK); 4207 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4208 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4209 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4210 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4211 val &= ~MVPP2_TXQ_PENDING_MASK; 4212 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4213 4214 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4215 * for each existing TXQ. 4216 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4217 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4218 */ 4219 desc_per_txq = 16; 4220 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4221 (txq->log_id * desc_per_txq); 4222 4223 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4224 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4225 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4226 4227 /* WRR / EJP configuration - indirect access */ 4228 tx_port_num = mvpp2_egress_port(port); 4229 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4230 4231 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4232 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4233 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4234 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4235 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4236 4237 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4238 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4239 val); 4240 4241 for_each_present_cpu(cpu) { 4242 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4243 txq_pcpu->size = txq->size; 4244 } 4245 4246 return 0; 4247 } 4248 4249 /* Free allocated TXQ resources */ 4250 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4251 struct mvpp2_tx_queue *txq) 4252 { 4253 txq->descs = NULL; 4254 txq->last_desc = 0; 4255 txq->next_desc_to_proc = 0; 4256 txq->descs_dma = 0; 4257 4258 /* Set minimum bandwidth for disabled TXQs */ 4259 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4260 4261 /* Set Tx descriptors queue starting address and size */ 4262 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4263 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4264 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4265 } 4266 4267 /* Cleanup Tx ports */ 4268 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4269 { 4270 struct mvpp2_txq_pcpu *txq_pcpu; 4271 int delay, pending, cpu; 4272 u32 val; 4273 4274 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4275 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4276 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4277 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4278 4279 /* The napi queue has been stopped so wait for all packets 4280 * to be transmitted. 4281 */ 4282 delay = 0; 4283 do { 4284 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4285 netdev_warn(port->dev, 4286 "port %d: cleaning queue %d timed out\n", 4287 port->id, txq->log_id); 4288 break; 4289 } 4290 mdelay(1); 4291 delay++; 4292 4293 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4294 } while (pending); 4295 4296 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4297 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4298 4299 for_each_present_cpu(cpu) { 4300 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4301 4302 /* Release all packets */ 4303 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4304 4305 /* Reset queue */ 4306 txq_pcpu->count = 0; 4307 txq_pcpu->txq_put_index = 0; 4308 txq_pcpu->txq_get_index = 0; 4309 } 4310 } 4311 4312 /* Cleanup all Tx queues */ 4313 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4314 { 4315 struct mvpp2_tx_queue *txq; 4316 int queue; 4317 u32 val; 4318 4319 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4320 4321 /* Reset Tx ports and delete Tx queues */ 4322 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4323 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4324 4325 for (queue = 0; queue < txq_number; queue++) { 4326 txq = port->txqs[queue]; 4327 mvpp2_txq_clean(port, txq); 4328 mvpp2_txq_deinit(port, txq); 4329 } 4330 4331 mvpp2_txq_sent_counter_clear(port); 4332 4333 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4334 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4335 } 4336 4337 /* Cleanup all Rx queues */ 4338 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4339 { 4340 int queue; 4341 4342 for (queue = 0; queue < rxq_number; queue++) 4343 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4344 } 4345 4346 /* Init all Rx queues for port */ 4347 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4348 { 4349 int queue, err; 4350 4351 for (queue = 0; queue < rxq_number; queue++) { 4352 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4353 if (err) 4354 goto err_cleanup; 4355 } 4356 return 0; 4357 4358 err_cleanup: 4359 mvpp2_cleanup_rxqs(port); 4360 return err; 4361 } 4362 4363 /* Init all tx queues for port */ 4364 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4365 { 4366 struct mvpp2_tx_queue *txq; 4367 int queue, err; 4368 4369 for (queue = 0; queue < txq_number; queue++) { 4370 txq = port->txqs[queue]; 4371 err = mvpp2_txq_init(port, txq); 4372 if (err) 4373 goto err_cleanup; 4374 } 4375 4376 mvpp2_txq_sent_counter_clear(port); 4377 return 0; 4378 4379 err_cleanup: 4380 mvpp2_cleanup_txqs(port); 4381 return err; 4382 } 4383 4384 /* Adjust link */ 4385 static void mvpp2_link_event(struct mvpp2_port *port) 4386 { 4387 struct phy_device *phydev = port->phy_dev; 4388 int status_change = 0; 4389 u32 val; 4390 4391 if (phydev->link) { 4392 if ((port->speed != phydev->speed) || 4393 (port->duplex != phydev->duplex)) { 4394 u32 val; 4395 4396 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4397 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4398 MVPP2_GMAC_CONFIG_GMII_SPEED | 4399 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4400 MVPP2_GMAC_AN_SPEED_EN | 4401 MVPP2_GMAC_AN_DUPLEX_EN); 4402 4403 if (phydev->duplex) 4404 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4405 4406 if (phydev->speed == SPEED_1000) 4407 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4408 else if (phydev->speed == SPEED_100) 4409 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4410 4411 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4412 4413 port->duplex = phydev->duplex; 4414 port->speed = phydev->speed; 4415 } 4416 } 4417 4418 if (phydev->link != port->link) { 4419 if (!phydev->link) { 4420 port->duplex = -1; 4421 port->speed = 0; 4422 } 4423 4424 port->link = phydev->link; 4425 status_change = 1; 4426 } 4427 4428 if (status_change) { 4429 if (phydev->link) { 4430 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4431 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4432 MVPP2_GMAC_FORCE_LINK_DOWN); 4433 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4434 mvpp2_egress_enable(port); 4435 mvpp2_ingress_enable(port); 4436 } else { 4437 mvpp2_ingress_disable(port); 4438 mvpp2_egress_disable(port); 4439 } 4440 } 4441 } 4442 4443 /* Main RX/TX processing routines */ 4444 4445 /* Display more error info */ 4446 static void mvpp2_rx_error(struct mvpp2_port *port, 4447 struct mvpp2_rx_desc *rx_desc) 4448 { 4449 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4450 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4451 4452 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4453 case MVPP2_RXD_ERR_CRC: 4454 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4455 status, sz); 4456 break; 4457 case MVPP2_RXD_ERR_OVERRUN: 4458 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4459 status, sz); 4460 break; 4461 case MVPP2_RXD_ERR_RESOURCE: 4462 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4463 status, sz); 4464 break; 4465 } 4466 } 4467 4468 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4469 static int mvpp2_rx_refill(struct mvpp2_port *port, 4470 struct mvpp2_bm_pool *bm_pool, 4471 u32 bm, dma_addr_t dma_addr) 4472 { 4473 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4474 return 0; 4475 } 4476 4477 /* Set hw internals when starting port */ 4478 static void mvpp2_start_dev(struct mvpp2_port *port) 4479 { 4480 switch (port->phy_interface) { 4481 case PHY_INTERFACE_MODE_RGMII: 4482 case PHY_INTERFACE_MODE_RGMII_ID: 4483 case PHY_INTERFACE_MODE_SGMII: 4484 mvpp2_gmac_max_rx_size_set(port); 4485 default: 4486 break; 4487 } 4488 4489 mvpp2_txp_max_tx_size_set(port); 4490 4491 if (port->priv->hw_version == MVPP21) 4492 mvpp2_port_enable(port); 4493 else 4494 gop_port_enable(port, 1); 4495 } 4496 4497 /* Set hw internals when stopping port */ 4498 static void mvpp2_stop_dev(struct mvpp2_port *port) 4499 { 4500 /* Stop new packets from arriving to RXQs */ 4501 mvpp2_ingress_disable(port); 4502 4503 mvpp2_egress_disable(port); 4504 4505 if (port->priv->hw_version == MVPP21) 4506 mvpp2_port_disable(port); 4507 else 4508 gop_port_enable(port, 0); 4509 } 4510 4511 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4512 { 4513 struct phy_device *phy_dev; 4514 4515 if (!port->init || port->link == 0) { 4516 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 4517 port->phy_interface); 4518 port->phy_dev = phy_dev; 4519 if (!phy_dev) { 4520 netdev_err(port->dev, "cannot connect to phy\n"); 4521 return -ENODEV; 4522 } 4523 phy_dev->supported &= PHY_GBIT_FEATURES; 4524 phy_dev->advertising = phy_dev->supported; 4525 4526 port->phy_dev = phy_dev; 4527 port->link = 0; 4528 port->duplex = 0; 4529 port->speed = 0; 4530 4531 phy_config(phy_dev); 4532 phy_startup(phy_dev); 4533 if (!phy_dev->link) { 4534 printf("%s: No link\n", phy_dev->dev->name); 4535 return -1; 4536 } 4537 4538 port->init = 1; 4539 } else { 4540 mvpp2_egress_enable(port); 4541 mvpp2_ingress_enable(port); 4542 } 4543 4544 return 0; 4545 } 4546 4547 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4548 { 4549 unsigned char mac_bcast[ETH_ALEN] = { 4550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4551 int err; 4552 4553 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4554 if (err) { 4555 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4556 return err; 4557 } 4558 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4559 port->dev_addr, true); 4560 if (err) { 4561 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4562 return err; 4563 } 4564 err = mvpp2_prs_def_flow(port); 4565 if (err) { 4566 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4567 return err; 4568 } 4569 4570 /* Allocate the Rx/Tx queues */ 4571 err = mvpp2_setup_rxqs(port); 4572 if (err) { 4573 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4574 return err; 4575 } 4576 4577 err = mvpp2_setup_txqs(port); 4578 if (err) { 4579 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4580 return err; 4581 } 4582 4583 if (port->phy_node) { 4584 err = mvpp2_phy_connect(dev, port); 4585 if (err < 0) 4586 return err; 4587 4588 mvpp2_link_event(port); 4589 } else { 4590 mvpp2_egress_enable(port); 4591 mvpp2_ingress_enable(port); 4592 } 4593 4594 mvpp2_start_dev(port); 4595 4596 return 0; 4597 } 4598 4599 /* No Device ops here in U-Boot */ 4600 4601 /* Driver initialization */ 4602 4603 static void mvpp2_port_power_up(struct mvpp2_port *port) 4604 { 4605 struct mvpp2 *priv = port->priv; 4606 4607 /* On PPv2.2 the GoP / interface configuration has already been done */ 4608 if (priv->hw_version == MVPP21) 4609 mvpp2_port_mii_set(port); 4610 mvpp2_port_periodic_xon_disable(port); 4611 if (priv->hw_version == MVPP21) 4612 mvpp2_port_fc_adv_enable(port); 4613 mvpp2_port_reset(port); 4614 } 4615 4616 /* Initialize port HW */ 4617 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4618 { 4619 struct mvpp2 *priv = port->priv; 4620 struct mvpp2_txq_pcpu *txq_pcpu; 4621 int queue, cpu, err; 4622 4623 if (port->first_rxq + rxq_number > 4624 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4625 return -EINVAL; 4626 4627 /* Disable port */ 4628 mvpp2_egress_disable(port); 4629 if (priv->hw_version == MVPP21) 4630 mvpp2_port_disable(port); 4631 else 4632 gop_port_enable(port, 0); 4633 4634 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4635 GFP_KERNEL); 4636 if (!port->txqs) 4637 return -ENOMEM; 4638 4639 /* Associate physical Tx queues to this port and initialize. 4640 * The mapping is predefined. 4641 */ 4642 for (queue = 0; queue < txq_number; queue++) { 4643 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4644 struct mvpp2_tx_queue *txq; 4645 4646 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4647 if (!txq) 4648 return -ENOMEM; 4649 4650 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4651 GFP_KERNEL); 4652 if (!txq->pcpu) 4653 return -ENOMEM; 4654 4655 txq->id = queue_phy_id; 4656 txq->log_id = queue; 4657 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4658 for_each_present_cpu(cpu) { 4659 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4660 txq_pcpu->cpu = cpu; 4661 } 4662 4663 port->txqs[queue] = txq; 4664 } 4665 4666 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4667 GFP_KERNEL); 4668 if (!port->rxqs) 4669 return -ENOMEM; 4670 4671 /* Allocate and initialize Rx queue for this port */ 4672 for (queue = 0; queue < rxq_number; queue++) { 4673 struct mvpp2_rx_queue *rxq; 4674 4675 /* Map physical Rx queue to port's logical Rx queue */ 4676 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4677 if (!rxq) 4678 return -ENOMEM; 4679 /* Map this Rx queue to a physical queue */ 4680 rxq->id = port->first_rxq + queue; 4681 rxq->port = port->id; 4682 rxq->logic_rxq = queue; 4683 4684 port->rxqs[queue] = rxq; 4685 } 4686 4687 4688 /* Create Rx descriptor rings */ 4689 for (queue = 0; queue < rxq_number; queue++) { 4690 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4691 4692 rxq->size = port->rx_ring_size; 4693 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4694 rxq->time_coal = MVPP2_RX_COAL_USEC; 4695 } 4696 4697 mvpp2_ingress_disable(port); 4698 4699 /* Port default configuration */ 4700 mvpp2_defaults_set(port); 4701 4702 /* Port's classifier configuration */ 4703 mvpp2_cls_oversize_rxq_set(port); 4704 mvpp2_cls_port_config(port); 4705 4706 /* Provide an initial Rx packet size */ 4707 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4708 4709 /* Initialize pools for swf */ 4710 err = mvpp2_swf_bm_pool_init(port); 4711 if (err) 4712 return err; 4713 4714 return 0; 4715 } 4716 4717 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4718 { 4719 int port_node = dev_of_offset(dev); 4720 const char *phy_mode_str; 4721 int phy_node, mdio_off, cp_node; 4722 u32 id; 4723 u32 phyaddr = 0; 4724 int phy_mode = -1; 4725 phys_addr_t mdio_addr; 4726 4727 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4728 4729 if (phy_node > 0) { 4730 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4731 if (phyaddr < 0) { 4732 dev_err(&pdev->dev, "could not find phy address\n"); 4733 return -1; 4734 } 4735 mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node); 4736 4737 /* TODO: This WA for mdio issue. U-boot 2017 don't have 4738 * mdio driver and on MACHIATOBin board ports from CP1 4739 * connected to mdio on CP0. 4740 * WA is to get mdio address from phy handler parent 4741 * base address. WA should be removed after 4742 * mdio driver implementation. 4743 */ 4744 mdio_addr = fdtdec_get_uint(gd->fdt_blob, 4745 mdio_off, "reg", 0); 4746 4747 cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off); 4748 mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob, 4749 cp_node); 4750 4751 port->priv->mdio_base = (void *)mdio_addr; 4752 4753 if (port->priv->mdio_base < 0) { 4754 dev_err(&pdev->dev, "could not find mdio base address\n"); 4755 return -1; 4756 } 4757 } else { 4758 phy_node = 0; 4759 } 4760 4761 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4762 if (phy_mode_str) 4763 phy_mode = phy_get_interface_by_name(phy_mode_str); 4764 if (phy_mode == -1) { 4765 dev_err(&pdev->dev, "incorrect phy mode\n"); 4766 return -EINVAL; 4767 } 4768 4769 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4770 if (id == -1) { 4771 dev_err(&pdev->dev, "missing port-id value\n"); 4772 return -EINVAL; 4773 } 4774 4775 #ifdef CONFIG_DM_GPIO 4776 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4777 &port->phy_reset_gpio, GPIOD_IS_OUT); 4778 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4779 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4780 #endif 4781 4782 /* 4783 * ToDo: 4784 * Not sure if this DT property "phy-speed" will get accepted, so 4785 * this might change later 4786 */ 4787 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4788 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4789 "phy-speed", 1000); 4790 4791 port->id = id; 4792 if (port->priv->hw_version == MVPP21) 4793 port->first_rxq = port->id * rxq_number; 4794 else 4795 port->first_rxq = port->id * port->priv->max_port_rxqs; 4796 port->phy_node = phy_node; 4797 port->phy_interface = phy_mode; 4798 port->phyaddr = phyaddr; 4799 4800 return 0; 4801 } 4802 4803 #ifdef CONFIG_DM_GPIO 4804 /* Port GPIO initialization */ 4805 static void mvpp2_gpio_init(struct mvpp2_port *port) 4806 { 4807 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4808 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4809 udelay(1000); 4810 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4811 } 4812 4813 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4814 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4815 } 4816 #endif 4817 4818 /* Ports initialization */ 4819 static int mvpp2_port_probe(struct udevice *dev, 4820 struct mvpp2_port *port, 4821 int port_node, 4822 struct mvpp2 *priv) 4823 { 4824 int err; 4825 4826 port->tx_ring_size = MVPP2_MAX_TXD; 4827 port->rx_ring_size = MVPP2_MAX_RXD; 4828 4829 err = mvpp2_port_init(dev, port); 4830 if (err < 0) { 4831 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4832 return err; 4833 } 4834 mvpp2_port_power_up(port); 4835 4836 #ifdef CONFIG_DM_GPIO 4837 mvpp2_gpio_init(port); 4838 #endif 4839 4840 priv->port_list[port->id] = port; 4841 priv->num_ports++; 4842 return 0; 4843 } 4844 4845 /* Initialize decoding windows */ 4846 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4847 struct mvpp2 *priv) 4848 { 4849 u32 win_enable; 4850 int i; 4851 4852 for (i = 0; i < 6; i++) { 4853 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4854 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4855 4856 if (i < 4) 4857 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4858 } 4859 4860 win_enable = 0; 4861 4862 for (i = 0; i < dram->num_cs; i++) { 4863 const struct mbus_dram_window *cs = dram->cs + i; 4864 4865 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4866 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4867 dram->mbus_dram_target_id); 4868 4869 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4870 (cs->size - 1) & 0xffff0000); 4871 4872 win_enable |= (1 << i); 4873 } 4874 4875 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4876 } 4877 4878 /* Initialize Rx FIFO's */ 4879 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4880 { 4881 int port; 4882 4883 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4884 if (priv->hw_version == MVPP22) { 4885 if (port == 0) { 4886 mvpp2_write(priv, 4887 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4888 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4889 mvpp2_write(priv, 4890 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4891 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4892 } else if (port == 1) { 4893 mvpp2_write(priv, 4894 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4895 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4896 mvpp2_write(priv, 4897 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4898 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4899 } else { 4900 mvpp2_write(priv, 4901 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4902 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4903 mvpp2_write(priv, 4904 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4905 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4906 } 4907 } else { 4908 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4909 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4910 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4911 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4912 } 4913 } 4914 4915 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4916 MVPP2_RX_FIFO_PORT_MIN_PKT); 4917 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4918 } 4919 4920 /* Initialize Tx FIFO's */ 4921 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4922 { 4923 int port, val; 4924 4925 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4926 /* Port 0 supports 10KB TX FIFO */ 4927 if (port == 0) { 4928 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4929 MVPP22_TX_FIFO_SIZE_MASK; 4930 } else { 4931 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4932 MVPP22_TX_FIFO_SIZE_MASK; 4933 } 4934 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4935 } 4936 } 4937 4938 static void mvpp2_axi_init(struct mvpp2 *priv) 4939 { 4940 u32 val, rdval, wrval; 4941 4942 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4943 4944 /* AXI Bridge Configuration */ 4945 4946 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4947 << MVPP22_AXI_ATTR_CACHE_OFFS; 4948 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4949 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4950 4951 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4952 << MVPP22_AXI_ATTR_CACHE_OFFS; 4953 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4954 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4955 4956 /* BM */ 4957 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4958 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4959 4960 /* Descriptors */ 4961 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4962 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4963 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4964 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4965 4966 /* Buffer Data */ 4967 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4968 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4969 4970 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4971 << MVPP22_AXI_CODE_CACHE_OFFS; 4972 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4973 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4974 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4975 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4976 4977 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4978 << MVPP22_AXI_CODE_CACHE_OFFS; 4979 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4980 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4981 4982 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4983 4984 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4985 << MVPP22_AXI_CODE_CACHE_OFFS; 4986 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4987 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4988 4989 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 4990 } 4991 4992 /* Initialize network controller common part HW */ 4993 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 4994 { 4995 const struct mbus_dram_target_info *dram_target_info; 4996 int err, i; 4997 u32 val; 4998 4999 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 5000 if ((rxq_number > priv->max_port_rxqs) || 5001 (txq_number > MVPP2_MAX_TXQ)) { 5002 dev_err(&pdev->dev, "invalid queue size parameter\n"); 5003 return -EINVAL; 5004 } 5005 5006 if (priv->hw_version == MVPP22) 5007 mvpp2_axi_init(priv); 5008 else { 5009 /* MBUS windows configuration */ 5010 dram_target_info = mvebu_mbus_dram_info(); 5011 if (dram_target_info) 5012 mvpp2_conf_mbus_windows(dram_target_info, priv); 5013 } 5014 5015 if (priv->hw_version == MVPP21) { 5016 /* Disable HW PHY polling */ 5017 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5018 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5019 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5020 } else { 5021 /* Enable HW PHY polling */ 5022 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5023 val |= MVPP22_SMI_POLLING_EN; 5024 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5025 } 5026 5027 /* Allocate and initialize aggregated TXQs */ 5028 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5029 sizeof(struct mvpp2_tx_queue), 5030 GFP_KERNEL); 5031 if (!priv->aggr_txqs) 5032 return -ENOMEM; 5033 5034 for_each_present_cpu(i) { 5035 priv->aggr_txqs[i].id = i; 5036 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5037 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5038 MVPP2_AGGR_TXQ_SIZE, i, priv); 5039 if (err < 0) 5040 return err; 5041 } 5042 5043 /* Rx Fifo Init */ 5044 mvpp2_rx_fifo_init(priv); 5045 5046 /* Tx Fifo Init */ 5047 if (priv->hw_version == MVPP22) 5048 mvpp2_tx_fifo_init(priv); 5049 5050 if (priv->hw_version == MVPP21) 5051 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5052 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5053 5054 /* Allow cache snoop when transmiting packets */ 5055 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5056 5057 /* Buffer Manager initialization */ 5058 err = mvpp2_bm_init(dev, priv); 5059 if (err < 0) 5060 return err; 5061 5062 /* Parser default initialization */ 5063 err = mvpp2_prs_default_init(dev, priv); 5064 if (err < 0) 5065 return err; 5066 5067 /* Classifier default initialization */ 5068 mvpp2_cls_init(priv); 5069 5070 return 0; 5071 } 5072 5073 /* SMI / MDIO functions */ 5074 5075 static int smi_wait_ready(struct mvpp2 *priv) 5076 { 5077 u32 timeout = MVPP2_SMI_TIMEOUT; 5078 u32 smi_reg; 5079 5080 /* wait till the SMI is not busy */ 5081 do { 5082 /* read smi register */ 5083 smi_reg = readl(priv->mdio_base); 5084 if (timeout-- == 0) { 5085 printf("Error: SMI busy timeout\n"); 5086 return -EFAULT; 5087 } 5088 } while (smi_reg & MVPP2_SMI_BUSY); 5089 5090 return 0; 5091 } 5092 5093 /* 5094 * mpp2_mdio_read - miiphy_read callback function. 5095 * 5096 * Returns 16bit phy register value, or 0xffff on error 5097 */ 5098 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5099 { 5100 struct mvpp2 *priv = bus->priv; 5101 u32 smi_reg; 5102 u32 timeout; 5103 5104 /* check parameters */ 5105 if (addr > MVPP2_PHY_ADDR_MASK) { 5106 printf("Error: Invalid PHY address %d\n", addr); 5107 return -EFAULT; 5108 } 5109 5110 if (reg > MVPP2_PHY_REG_MASK) { 5111 printf("Err: Invalid register offset %d\n", reg); 5112 return -EFAULT; 5113 } 5114 5115 /* wait till the SMI is not busy */ 5116 if (smi_wait_ready(priv) < 0) 5117 return -EFAULT; 5118 5119 /* fill the phy address and regiser offset and read opcode */ 5120 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5121 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5122 | MVPP2_SMI_OPCODE_READ; 5123 5124 /* write the smi register */ 5125 writel(smi_reg, priv->mdio_base); 5126 5127 /* wait till read value is ready */ 5128 timeout = MVPP2_SMI_TIMEOUT; 5129 5130 do { 5131 /* read smi register */ 5132 smi_reg = readl(priv->mdio_base); 5133 if (timeout-- == 0) { 5134 printf("Err: SMI read ready timeout\n"); 5135 return -EFAULT; 5136 } 5137 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5138 5139 /* Wait for the data to update in the SMI register */ 5140 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5141 ; 5142 5143 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5144 } 5145 5146 /* 5147 * mpp2_mdio_write - miiphy_write callback function. 5148 * 5149 * Returns 0 if write succeed, -EINVAL on bad parameters 5150 * -ETIME on timeout 5151 */ 5152 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5153 u16 value) 5154 { 5155 struct mvpp2 *priv = bus->priv; 5156 u32 smi_reg; 5157 5158 /* check parameters */ 5159 if (addr > MVPP2_PHY_ADDR_MASK) { 5160 printf("Error: Invalid PHY address %d\n", addr); 5161 return -EFAULT; 5162 } 5163 5164 if (reg > MVPP2_PHY_REG_MASK) { 5165 printf("Err: Invalid register offset %d\n", reg); 5166 return -EFAULT; 5167 } 5168 5169 /* wait till the SMI is not busy */ 5170 if (smi_wait_ready(priv) < 0) 5171 return -EFAULT; 5172 5173 /* fill the phy addr and reg offset and write opcode and data */ 5174 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5175 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5176 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5177 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5178 5179 /* write the smi register */ 5180 writel(smi_reg, priv->mdio_base); 5181 5182 return 0; 5183 } 5184 5185 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5186 { 5187 struct mvpp2_port *port = dev_get_priv(dev); 5188 struct mvpp2_rx_desc *rx_desc; 5189 struct mvpp2_bm_pool *bm_pool; 5190 dma_addr_t dma_addr; 5191 u32 bm, rx_status; 5192 int pool, rx_bytes, err; 5193 int rx_received; 5194 struct mvpp2_rx_queue *rxq; 5195 u8 *data; 5196 5197 /* Process RX packets */ 5198 rxq = port->rxqs[0]; 5199 5200 /* Get number of received packets and clamp the to-do */ 5201 rx_received = mvpp2_rxq_received(port, rxq->id); 5202 5203 /* Return if no packets are received */ 5204 if (!rx_received) 5205 return 0; 5206 5207 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5208 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5209 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5210 rx_bytes -= MVPP2_MH_SIZE; 5211 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5212 5213 bm = mvpp2_bm_cookie_build(port, rx_desc); 5214 pool = mvpp2_bm_cookie_pool_get(bm); 5215 bm_pool = &port->priv->bm_pools[pool]; 5216 5217 /* In case of an error, release the requested buffer pointer 5218 * to the Buffer Manager. This request process is controlled 5219 * by the hardware, and the information about the buffer is 5220 * comprised by the RX descriptor. 5221 */ 5222 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5223 mvpp2_rx_error(port, rx_desc); 5224 /* Return the buffer to the pool */ 5225 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5226 return 0; 5227 } 5228 5229 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5230 if (err) { 5231 netdev_err(port->dev, "failed to refill BM pools\n"); 5232 return 0; 5233 } 5234 5235 /* Update Rx queue management counters */ 5236 mb(); 5237 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5238 5239 /* give packet to stack - skip on first n bytes */ 5240 data = (u8 *)dma_addr + 2 + 32; 5241 5242 if (rx_bytes <= 0) 5243 return 0; 5244 5245 /* 5246 * No cache invalidation needed here, since the rx_buffer's are 5247 * located in a uncached memory region 5248 */ 5249 *packetp = data; 5250 5251 return rx_bytes; 5252 } 5253 5254 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5255 { 5256 struct mvpp2_port *port = dev_get_priv(dev); 5257 struct mvpp2_tx_queue *txq, *aggr_txq; 5258 struct mvpp2_tx_desc *tx_desc; 5259 int tx_done; 5260 int timeout; 5261 5262 txq = port->txqs[0]; 5263 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5264 5265 /* Get a descriptor for the first part of the packet */ 5266 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5267 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5268 mvpp2_txdesc_size_set(port, tx_desc, length); 5269 mvpp2_txdesc_offset_set(port, tx_desc, 5270 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5271 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5272 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5273 /* First and Last descriptor */ 5274 mvpp2_txdesc_cmd_set(port, tx_desc, 5275 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5276 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5277 5278 /* Flush tx data */ 5279 flush_dcache_range((unsigned long)packet, 5280 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5281 5282 /* Enable transmit */ 5283 mb(); 5284 mvpp2_aggr_txq_pend_desc_add(port, 1); 5285 5286 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5287 5288 timeout = 0; 5289 do { 5290 if (timeout++ > 10000) { 5291 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5292 return 0; 5293 } 5294 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5295 } while (tx_done); 5296 5297 timeout = 0; 5298 do { 5299 if (timeout++ > 10000) { 5300 printf("timeout: packet not sent\n"); 5301 return 0; 5302 } 5303 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5304 } while (!tx_done); 5305 5306 return 0; 5307 } 5308 5309 static int mvpp2_start(struct udevice *dev) 5310 { 5311 struct eth_pdata *pdata = dev_get_platdata(dev); 5312 struct mvpp2_port *port = dev_get_priv(dev); 5313 5314 /* Load current MAC address */ 5315 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5316 5317 /* Reconfigure parser accept the original MAC address */ 5318 mvpp2_prs_update_mac_da(port, port->dev_addr); 5319 5320 switch (port->phy_interface) { 5321 case PHY_INTERFACE_MODE_RGMII: 5322 case PHY_INTERFACE_MODE_RGMII_ID: 5323 case PHY_INTERFACE_MODE_SGMII: 5324 mvpp2_port_power_up(port); 5325 default: 5326 break; 5327 } 5328 5329 mvpp2_open(dev, port); 5330 5331 return 0; 5332 } 5333 5334 static void mvpp2_stop(struct udevice *dev) 5335 { 5336 struct mvpp2_port *port = dev_get_priv(dev); 5337 5338 mvpp2_stop_dev(port); 5339 mvpp2_cleanup_rxqs(port); 5340 mvpp2_cleanup_txqs(port); 5341 } 5342 5343 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5344 { 5345 writel(port->phyaddr, port->priv->iface_base + 5346 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5347 5348 return 0; 5349 } 5350 5351 static int mvpp2_base_probe(struct udevice *dev) 5352 { 5353 struct mvpp2 *priv = dev_get_priv(dev); 5354 struct mii_dev *bus; 5355 void *bd_space; 5356 u32 size = 0; 5357 int i; 5358 5359 /* Save hw-version */ 5360 priv->hw_version = dev_get_driver_data(dev); 5361 5362 /* 5363 * U-Boot special buffer handling: 5364 * 5365 * Allocate buffer area for descs and rx_buffers. This is only 5366 * done once for all interfaces. As only one interface can 5367 * be active. Make this area DMA-safe by disabling the D-cache 5368 */ 5369 5370 /* Align buffer area for descs and rx_buffers to 1MiB */ 5371 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5372 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5373 BD_SPACE, DCACHE_OFF); 5374 5375 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5376 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5377 5378 buffer_loc.tx_descs = 5379 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5380 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5381 5382 buffer_loc.rx_descs = 5383 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5384 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5385 5386 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5387 buffer_loc.bm_pool[i] = 5388 (unsigned long *)((unsigned long)bd_space + size); 5389 if (priv->hw_version == MVPP21) 5390 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5391 else 5392 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5393 } 5394 5395 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5396 buffer_loc.rx_buffer[i] = 5397 (unsigned long *)((unsigned long)bd_space + size); 5398 size += RX_BUFFER_SIZE; 5399 } 5400 5401 /* Clear the complete area so that all descriptors are cleared */ 5402 memset(bd_space, 0, size); 5403 5404 /* Save base addresses for later use */ 5405 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5406 if (IS_ERR(priv->base)) 5407 return PTR_ERR(priv->base); 5408 5409 if (priv->hw_version == MVPP21) { 5410 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5411 if (IS_ERR(priv->lms_base)) 5412 return PTR_ERR(priv->lms_base); 5413 5414 priv->mdio_base = priv->lms_base + MVPP21_SMI; 5415 } else { 5416 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5417 if (IS_ERR(priv->iface_base)) 5418 return PTR_ERR(priv->iface_base); 5419 5420 priv->mdio_base = priv->iface_base + MVPP22_SMI; 5421 5422 /* Store common base addresses for all ports */ 5423 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5424 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5425 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5426 } 5427 5428 if (priv->hw_version == MVPP21) 5429 priv->max_port_rxqs = 8; 5430 else 5431 priv->max_port_rxqs = 32; 5432 5433 /* Finally create and register the MDIO bus driver */ 5434 bus = mdio_alloc(); 5435 if (!bus) { 5436 printf("Failed to allocate MDIO bus\n"); 5437 return -ENOMEM; 5438 } 5439 5440 bus->read = mpp2_mdio_read; 5441 bus->write = mpp2_mdio_write; 5442 snprintf(bus->name, sizeof(bus->name), dev->name); 5443 bus->priv = (void *)priv; 5444 priv->bus = bus; 5445 5446 return mdio_register(bus); 5447 } 5448 5449 static int mvpp2_probe(struct udevice *dev) 5450 { 5451 struct mvpp2_port *port = dev_get_priv(dev); 5452 struct mvpp2 *priv = dev_get_priv(dev->parent); 5453 int err; 5454 5455 /* Only call the probe function for the parent once */ 5456 if (!priv->probe_done) 5457 err = mvpp2_base_probe(dev->parent); 5458 5459 port->priv = dev_get_priv(dev->parent); 5460 5461 err = phy_info_parse(dev, port); 5462 if (err) 5463 return err; 5464 5465 /* 5466 * We need the port specific io base addresses at this stage, since 5467 * gop_port_init() accesses these registers 5468 */ 5469 if (priv->hw_version == MVPP21) { 5470 int priv_common_regs_num = 2; 5471 5472 port->base = (void __iomem *)devfdt_get_addr_index( 5473 dev->parent, priv_common_regs_num + port->id); 5474 if (IS_ERR(port->base)) 5475 return PTR_ERR(port->base); 5476 } else { 5477 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5478 "gop-port-id", -1); 5479 if (port->id == -1) { 5480 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5481 return -EINVAL; 5482 } 5483 5484 port->base = priv->iface_base + MVPP22_PORT_BASE + 5485 port->gop_id * MVPP22_PORT_OFFSET; 5486 5487 /* Set phy address of the port */ 5488 if(port->phy_node) 5489 mvpp22_smi_phy_addr_cfg(port); 5490 5491 /* GoP Init */ 5492 gop_port_init(port); 5493 } 5494 5495 if (!priv->probe_done) { 5496 /* Initialize network controller */ 5497 err = mvpp2_init(dev, priv); 5498 if (err < 0) { 5499 dev_err(&pdev->dev, "failed to initialize controller\n"); 5500 return err; 5501 } 5502 priv->num_ports = 0; 5503 priv->probe_done = 1; 5504 } 5505 5506 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5507 if (err) 5508 return err; 5509 5510 if (priv->hw_version == MVPP22) { 5511 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5512 port->phy_interface); 5513 5514 /* Netcomplex configurations for all ports */ 5515 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5516 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5517 } 5518 5519 return 0; 5520 } 5521 5522 /* 5523 * Empty BM pool and stop its activity before the OS is started 5524 */ 5525 static int mvpp2_remove(struct udevice *dev) 5526 { 5527 struct mvpp2_port *port = dev_get_priv(dev); 5528 struct mvpp2 *priv = port->priv; 5529 int i; 5530 5531 priv->num_ports--; 5532 5533 if (priv->num_ports) 5534 return 0; 5535 5536 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5537 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5538 5539 return 0; 5540 } 5541 5542 static const struct eth_ops mvpp2_ops = { 5543 .start = mvpp2_start, 5544 .send = mvpp2_send, 5545 .recv = mvpp2_recv, 5546 .stop = mvpp2_stop, 5547 }; 5548 5549 static struct driver mvpp2_driver = { 5550 .name = "mvpp2", 5551 .id = UCLASS_ETH, 5552 .probe = mvpp2_probe, 5553 .remove = mvpp2_remove, 5554 .ops = &mvpp2_ops, 5555 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5556 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5557 .flags = DM_FLAG_ACTIVE_DMA, 5558 }; 5559 5560 /* 5561 * Use a MISC device to bind the n instances (child nodes) of the 5562 * network base controller in UCLASS_ETH. 5563 */ 5564 static int mvpp2_base_bind(struct udevice *parent) 5565 { 5566 const void *blob = gd->fdt_blob; 5567 int node = dev_of_offset(parent); 5568 struct uclass_driver *drv; 5569 struct udevice *dev; 5570 struct eth_pdata *plat; 5571 char *name; 5572 int subnode; 5573 u32 id; 5574 int base_id_add; 5575 5576 /* Lookup eth driver */ 5577 drv = lists_uclass_lookup(UCLASS_ETH); 5578 if (!drv) { 5579 puts("Cannot find eth driver\n"); 5580 return -ENOENT; 5581 } 5582 5583 base_id_add = base_id; 5584 5585 fdt_for_each_subnode(subnode, blob, node) { 5586 /* Increment base_id for all subnodes, also the disabled ones */ 5587 base_id++; 5588 5589 /* Skip disabled ports */ 5590 if (!fdtdec_get_is_enabled(blob, subnode)) 5591 continue; 5592 5593 plat = calloc(1, sizeof(*plat)); 5594 if (!plat) 5595 return -ENOMEM; 5596 5597 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5598 id += base_id_add; 5599 5600 name = calloc(1, 16); 5601 sprintf(name, "mvpp2-%d", id); 5602 5603 /* Create child device UCLASS_ETH and bind it */ 5604 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5605 dev_set_of_offset(dev, subnode); 5606 } 5607 5608 return 0; 5609 } 5610 5611 static const struct udevice_id mvpp2_ids[] = { 5612 { 5613 .compatible = "marvell,armada-375-pp2", 5614 .data = MVPP21, 5615 }, 5616 { 5617 .compatible = "marvell,armada-7k-pp22", 5618 .data = MVPP22, 5619 }, 5620 { } 5621 }; 5622 5623 U_BOOT_DRIVER(mvpp2_base) = { 5624 .name = "mvpp2_base", 5625 .id = UCLASS_MISC, 5626 .of_match = mvpp2_ids, 5627 .bind = mvpp2_base_bind, 5628 .priv_auto_alloc_size = sizeof(struct mvpp2), 5629 }; 5630