1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 #define __verify_pcpu_ptr(ptr) \ 39 do { \ 40 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 41 (void)__vpp_verify; \ 42 } while (0) 43 44 #define VERIFY_PERCPU_PTR(__p) \ 45 ({ \ 46 __verify_pcpu_ptr(__p); \ 47 (typeof(*(__p)) __kernel __force *)(__p); \ 48 }) 49 50 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 51 #define smp_processor_id() 0 52 #define num_present_cpus() 1 53 #define for_each_present_cpu(cpu) \ 54 for ((cpu) = 0; (cpu) < 1; (cpu)++) 55 56 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 57 58 #define CONFIG_NR_CPUS 1 59 60 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 61 #define WRAP (2 + ETH_HLEN + 4 + 32) 62 #define MTU 1500 63 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 64 65 #define MVPP2_SMI_TIMEOUT 10000 66 67 /* RX Fifo Registers */ 68 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 69 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 70 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 71 #define MVPP2_RX_FIFO_INIT_REG 0x64 72 73 /* RX DMA Top Registers */ 74 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 75 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 76 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 77 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 78 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 79 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 80 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 81 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 82 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 83 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 84 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 85 #define MVPP2_RXQ_POOL_LONG_OFFS 24 86 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 87 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 88 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 89 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 90 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 91 92 /* Parser Registers */ 93 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 94 #define MVPP2_PRS_PORT_LU_MAX 0xf 95 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 96 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 97 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 98 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 99 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 100 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 101 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 102 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 103 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 104 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 105 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 106 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 107 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 108 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 109 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 110 111 /* Classifier Registers */ 112 #define MVPP2_CLS_MODE_REG 0x1800 113 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 114 #define MVPP2_CLS_PORT_WAY_REG 0x1810 115 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 116 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 117 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 118 #define MVPP2_CLS_LKP_TBL_REG 0x1818 119 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 120 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 121 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 122 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 123 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 124 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 125 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 126 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 127 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 128 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 129 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 130 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 131 132 /* Descriptor Manager Top Registers */ 133 #define MVPP2_RXQ_NUM_REG 0x2040 134 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 135 #define MVPP22_DESC_ADDR_OFFS 8 136 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 137 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 138 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 139 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 140 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 141 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 142 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 143 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 144 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 145 #define MVPP2_RXQ_THRESH_REG 0x204c 146 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 147 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 148 #define MVPP2_RXQ_INDEX_REG 0x2050 149 #define MVPP2_TXQ_NUM_REG 0x2080 150 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 151 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 152 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 153 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 154 #define MVPP2_TXQ_THRESH_REG 0x2094 155 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 156 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 157 #define MVPP2_TXQ_INDEX_REG 0x2098 158 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 159 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 160 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 161 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 162 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 163 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 164 #define MVPP2_TXQ_PENDING_REG 0x20a0 165 #define MVPP2_TXQ_PENDING_MASK 0x3fff 166 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 167 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 168 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 169 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 170 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 171 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 172 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 173 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 174 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 175 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 176 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 177 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 178 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 179 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 180 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 181 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 182 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 183 184 /* MBUS bridge registers */ 185 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 186 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 187 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 188 #define MVPP2_BASE_ADDR_ENABLE 0x4060 189 190 /* AXI Bridge Registers */ 191 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 192 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 193 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 194 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 195 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 196 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 197 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 198 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 199 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 200 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 201 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 202 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 203 204 /* Values for AXI Bridge registers */ 205 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 206 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 207 208 #define MVPP22_AXI_CODE_CACHE_OFFS 0 209 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 210 211 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 212 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 213 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 214 215 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 216 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 217 218 /* Interrupt Cause and Mask registers */ 219 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 220 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 221 222 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 223 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 224 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 225 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 226 227 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 228 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 229 230 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 231 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 232 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 233 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 234 235 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 236 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 237 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 238 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 239 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 240 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 241 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 242 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 243 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 244 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 245 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 246 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 247 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 248 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 249 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 250 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 251 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 252 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 253 254 /* Buffer Manager registers */ 255 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 256 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 257 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 258 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 259 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 260 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 261 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 262 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 263 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 264 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 265 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 266 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 267 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 268 #define MVPP2_BM_START_MASK BIT(0) 269 #define MVPP2_BM_STOP_MASK BIT(1) 270 #define MVPP2_BM_STATE_MASK BIT(4) 271 #define MVPP2_BM_LOW_THRESH_OFFS 8 272 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 273 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 274 MVPP2_BM_LOW_THRESH_OFFS) 275 #define MVPP2_BM_HIGH_THRESH_OFFS 16 276 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 277 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 278 MVPP2_BM_HIGH_THRESH_OFFS) 279 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 280 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 281 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 282 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 283 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 284 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 285 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 286 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 287 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 288 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 289 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 290 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 291 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 292 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 293 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 294 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 295 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 296 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 297 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 298 #define MVPP21_BM_MC_RLS_REG 0x64c4 299 #define MVPP2_BM_MC_ID_MASK 0xfff 300 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 301 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 302 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 303 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 304 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 305 #define MVPP22_BM_MC_RLS_REG 0x64d4 306 #define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310 307 #define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff 308 309 /* TX Scheduler registers */ 310 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 311 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 312 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 313 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 314 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 315 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 316 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 317 #define MVPP2_TXP_MTU_MAX 0x7FFFF 318 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 319 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 320 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 321 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 322 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 323 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 324 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 325 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 326 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 327 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 328 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 329 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 330 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 331 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 332 333 /* TX general registers */ 334 #define MVPP2_TX_SNOOP_REG 0x8800 335 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 336 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 337 338 /* LMS registers */ 339 #define MVPP2_SRC_ADDR_MIDDLE 0x24 340 #define MVPP2_SRC_ADDR_HIGH 0x28 341 #define MVPP2_PHY_AN_CFG0_REG 0x34 342 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 343 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 344 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 345 346 /* Per-port registers */ 347 #define MVPP2_GMAC_CTRL_0_REG 0x0 348 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 349 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 350 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 351 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 352 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 353 #define MVPP2_GMAC_CTRL_1_REG 0x4 354 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 355 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 356 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 357 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 358 #define MVPP2_GMAC_SA_LOW_OFFS 7 359 #define MVPP2_GMAC_CTRL_2_REG 0x8 360 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 361 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 362 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 363 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 364 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 365 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 366 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 367 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 368 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 369 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 370 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 371 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 372 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 373 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 374 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 375 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 376 #define MVPP2_GMAC_EN_FC_AN BIT(11) 377 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 378 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 379 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 380 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 381 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 382 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 383 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 384 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 385 #define MVPP2_GMAC_CTRL_4_REG 0x90 386 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 387 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 388 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 389 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 390 391 /* 392 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 393 * relative to port->base. 394 */ 395 396 /* Port Mac Control0 */ 397 #define MVPP22_XLG_CTRL0_REG 0x100 398 #define MVPP22_XLG_PORT_EN BIT(0) 399 #define MVPP22_XLG_MAC_RESETN BIT(1) 400 #define MVPP22_XLG_RX_FC_EN BIT(7) 401 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 402 /* Port Mac Control1 */ 403 #define MVPP22_XLG_CTRL1_REG 0x104 404 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 405 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 406 /* Port Interrupt Mask */ 407 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 408 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 409 /* Port Mac Control3 */ 410 #define MVPP22_XLG_CTRL3_REG 0x11c 411 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 412 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 413 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 414 /* Port Mac Control4 */ 415 #define MVPP22_XLG_CTRL4_REG 0x184 416 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 417 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 418 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 419 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 420 421 /* XPCS registers */ 422 423 /* Global Configuration 0 */ 424 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 425 #define MVPP22_XPCS_PCSRESET BIT(0) 426 #define MVPP22_XPCS_PCSMODE_OFFS 3 427 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 428 MVPP22_XPCS_PCSMODE_OFFS) 429 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 430 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 431 MVPP22_XPCS_LANEACTIVE_OFFS) 432 433 /* MPCS registers */ 434 435 #define PCS40G_COMMON_CONTROL 0x14 436 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 437 438 #define PCS_CLOCK_RESET 0x14c 439 #define TX_SD_CLK_RESET_MASK BIT(0) 440 #define RX_SD_CLK_RESET_MASK BIT(1) 441 #define MAC_CLK_RESET_MASK BIT(2) 442 #define CLK_DIVISION_RATIO_OFFS 4 443 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 444 #define CLK_DIV_PHASE_SET_MASK BIT(11) 445 446 /* System Soft Reset 1 */ 447 #define GOP_SOFT_RESET_1_REG 0x108 448 #define NETC_GOP_SOFT_RESET_OFFS 6 449 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 450 NETC_GOP_SOFT_RESET_OFFS) 451 452 /* Ports Control 0 */ 453 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 454 #define NETC_BUS_WIDTH_SELECT_OFFS 1 455 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 456 NETC_BUS_WIDTH_SELECT_OFFS) 457 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 458 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 459 NETC_GIG_RX_DATA_SAMPLE_OFFS) 460 #define NETC_CLK_DIV_PHASE_OFFS 31 461 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 462 /* Ports Control 1 */ 463 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 464 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 465 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 466 NETC_PORTS_ACTIVE_OFFSET(p)) 467 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 468 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 469 NETC_PORT_GIG_RF_RESET_OFFS(p)) 470 #define NETCOMP_CONTROL_0_REG 0x120 471 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 472 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 473 NETC_GBE_PORT0_SGMII_MODE_OFFS) 474 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 475 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 476 NETC_GBE_PORT1_SGMII_MODE_OFFS) 477 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 478 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 479 NETC_GBE_PORT1_MII_MODE_OFFS) 480 481 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 482 #define MVPP22_SMI_POLLING_EN BIT(10) 483 484 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 485 (0x4 * (port))) 486 487 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 488 489 /* Descriptor ring Macros */ 490 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 491 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 492 493 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 494 #define MVPP21_SMI 0x0054 495 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 496 #define MVPP22_SMI 0x1200 497 #define MVPP2_PHY_REG_MASK 0x1f 498 /* SMI register fields */ 499 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 500 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 501 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 502 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 503 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 504 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 505 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 506 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 507 508 #define MVPP2_PHY_ADDR_MASK 0x1f 509 #define MVPP2_PHY_REG_MASK 0x1f 510 511 /* Additional PPv2.2 offsets */ 512 #define MVPP22_MPCS 0x007000 513 #define MVPP22_XPCS 0x007400 514 #define MVPP22_PORT_BASE 0x007e00 515 #define MVPP22_PORT_OFFSET 0x001000 516 #define MVPP22_RFU1 0x318000 517 518 /* Maximum number of ports */ 519 #define MVPP22_GOP_MAC_NUM 4 520 521 /* Sets the field located at the specified in data */ 522 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 523 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 524 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 525 526 /* Net Complex */ 527 enum mv_netc_topology { 528 MV_NETC_GE_MAC2_SGMII = BIT(0), 529 MV_NETC_GE_MAC3_SGMII = BIT(1), 530 MV_NETC_GE_MAC3_RGMII = BIT(2), 531 }; 532 533 enum mv_netc_phase { 534 MV_NETC_FIRST_PHASE, 535 MV_NETC_SECOND_PHASE, 536 }; 537 538 enum mv_netc_sgmii_xmi_mode { 539 MV_NETC_GBE_SGMII, 540 MV_NETC_GBE_XMII, 541 }; 542 543 enum mv_netc_mii_mode { 544 MV_NETC_GBE_RGMII, 545 MV_NETC_GBE_MII, 546 }; 547 548 enum mv_netc_lanes { 549 MV_NETC_LANE_23, 550 MV_NETC_LANE_45, 551 }; 552 553 /* Various constants */ 554 555 /* Coalescing */ 556 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 557 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 558 #define MVPP2_RX_COAL_PKTS 32 559 #define MVPP2_RX_COAL_USEC 100 560 561 /* The two bytes Marvell header. Either contains a special value used 562 * by Marvell switches when a specific hardware mode is enabled (not 563 * supported by this driver) or is filled automatically by zeroes on 564 * the RX side. Those two bytes being at the front of the Ethernet 565 * header, they allow to have the IP header aligned on a 4 bytes 566 * boundary automatically: the hardware skips those two bytes on its 567 * own. 568 */ 569 #define MVPP2_MH_SIZE 2 570 #define MVPP2_ETH_TYPE_LEN 2 571 #define MVPP2_PPPOE_HDR_SIZE 8 572 #define MVPP2_VLAN_TAG_LEN 4 573 574 /* Lbtd 802.3 type */ 575 #define MVPP2_IP_LBDT_TYPE 0xfffa 576 577 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 578 #define MVPP2_TX_CSUM_MAX_SIZE 9800 579 580 /* Timeout constants */ 581 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 582 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 583 584 #define MVPP2_TX_MTU_MAX 0x7ffff 585 586 /* Maximum number of T-CONTs of PON port */ 587 #define MVPP2_MAX_TCONT 16 588 589 /* Maximum number of supported ports */ 590 #define MVPP2_MAX_PORTS 4 591 592 /* Maximum number of TXQs used by single port */ 593 #define MVPP2_MAX_TXQ 8 594 595 /* Default number of TXQs in use */ 596 #define MVPP2_DEFAULT_TXQ 1 597 598 /* Dfault number of RXQs in use */ 599 #define MVPP2_DEFAULT_RXQ 1 600 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 601 602 /* Max number of Rx descriptors */ 603 #define MVPP2_MAX_RXD 16 604 605 /* Max number of Tx descriptors */ 606 #define MVPP2_MAX_TXD 16 607 608 /* Amount of Tx descriptors that can be reserved at once by CPU */ 609 #define MVPP2_CPU_DESC_CHUNK 16 610 611 /* Max number of Tx descriptors in each aggregated queue */ 612 #define MVPP2_AGGR_TXQ_SIZE 16 613 614 /* Descriptor aligned size */ 615 #define MVPP2_DESC_ALIGNED_SIZE 32 616 617 /* Descriptor alignment mask */ 618 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 619 620 /* RX FIFO constants */ 621 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 622 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 623 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 624 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 625 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 626 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 627 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 628 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 629 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 630 631 /* TX general registers */ 632 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 633 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 634 635 /* TX FIFO constants */ 636 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 637 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 638 639 /* RX buffer constants */ 640 #define MVPP2_SKB_SHINFO_SIZE \ 641 0 642 643 #define MVPP2_RX_PKT_SIZE(mtu) \ 644 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 645 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 646 647 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 648 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 649 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 650 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 651 652 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 653 654 /* IPv6 max L3 address size */ 655 #define MVPP2_MAX_L3_ADDR_SIZE 16 656 657 /* Port flags */ 658 #define MVPP2_F_LOOPBACK BIT(0) 659 660 /* Marvell tag types */ 661 enum mvpp2_tag_type { 662 MVPP2_TAG_TYPE_NONE = 0, 663 MVPP2_TAG_TYPE_MH = 1, 664 MVPP2_TAG_TYPE_DSA = 2, 665 MVPP2_TAG_TYPE_EDSA = 3, 666 MVPP2_TAG_TYPE_VLAN = 4, 667 MVPP2_TAG_TYPE_LAST = 5 668 }; 669 670 /* Parser constants */ 671 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 672 #define MVPP2_PRS_TCAM_WORDS 6 673 #define MVPP2_PRS_SRAM_WORDS 4 674 #define MVPP2_PRS_FLOW_ID_SIZE 64 675 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 676 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 677 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 678 #define MVPP2_PRS_IPV4_HEAD 0x40 679 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 680 #define MVPP2_PRS_IPV4_MC 0xe0 681 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 682 #define MVPP2_PRS_IPV4_BC_MASK 0xff 683 #define MVPP2_PRS_IPV4_IHL 0x5 684 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 685 #define MVPP2_PRS_IPV6_MC 0xff 686 #define MVPP2_PRS_IPV6_MC_MASK 0xff 687 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 688 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 689 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 690 #define MVPP2_PRS_DBL_VLANS_MAX 100 691 692 /* Tcam structure: 693 * - lookup ID - 4 bits 694 * - port ID - 1 byte 695 * - additional information - 1 byte 696 * - header data - 8 bytes 697 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 698 */ 699 #define MVPP2_PRS_AI_BITS 8 700 #define MVPP2_PRS_PORT_MASK 0xff 701 #define MVPP2_PRS_LU_MASK 0xf 702 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 703 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 704 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 705 (((offs) * 2) - ((offs) % 2) + 2) 706 #define MVPP2_PRS_TCAM_AI_BYTE 16 707 #define MVPP2_PRS_TCAM_PORT_BYTE 17 708 #define MVPP2_PRS_TCAM_LU_BYTE 20 709 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 710 #define MVPP2_PRS_TCAM_INV_WORD 5 711 /* Tcam entries ID */ 712 #define MVPP2_PE_DROP_ALL 0 713 #define MVPP2_PE_FIRST_FREE_TID 1 714 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 715 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 716 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 717 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 718 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 719 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 720 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 721 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 722 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 723 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 724 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 725 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 726 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 727 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 728 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 729 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 730 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 731 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 732 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 733 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 734 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 735 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 736 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 737 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 738 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 739 740 /* Sram structure 741 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 742 */ 743 #define MVPP2_PRS_SRAM_RI_OFFS 0 744 #define MVPP2_PRS_SRAM_RI_WORD 0 745 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 746 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 747 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 748 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 749 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 750 #define MVPP2_PRS_SRAM_UDF_OFFS 73 751 #define MVPP2_PRS_SRAM_UDF_BITS 8 752 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 753 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 754 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 755 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 756 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 757 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 758 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 759 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 760 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 761 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 762 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 763 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 764 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 765 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 766 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 767 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 768 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 769 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 770 #define MVPP2_PRS_SRAM_AI_OFFS 90 771 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 772 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 773 #define MVPP2_PRS_SRAM_AI_MASK 0xff 774 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 775 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 776 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 777 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 778 779 /* Sram result info bits assignment */ 780 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 781 #define MVPP2_PRS_RI_DSA_MASK 0x2 782 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 783 #define MVPP2_PRS_RI_VLAN_NONE 0x0 784 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 785 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 786 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 787 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 788 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 789 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 790 #define MVPP2_PRS_RI_L2_UCAST 0x0 791 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 792 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 793 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 794 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 795 #define MVPP2_PRS_RI_L3_UN 0x0 796 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 797 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 798 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 799 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 800 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 801 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 802 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 803 #define MVPP2_PRS_RI_L3_UCAST 0x0 804 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 805 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 806 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 807 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 808 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 809 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 810 #define MVPP2_PRS_RI_L4_TCP BIT(22) 811 #define MVPP2_PRS_RI_L4_UDP BIT(23) 812 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 813 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 814 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 815 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 816 817 /* Sram additional info bits assignment */ 818 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 819 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 820 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 821 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 822 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 823 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 824 #define MVPP2_PRS_SINGLE_VLAN_AI 0 825 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 826 827 /* DSA/EDSA type */ 828 #define MVPP2_PRS_TAGGED true 829 #define MVPP2_PRS_UNTAGGED false 830 #define MVPP2_PRS_EDSA true 831 #define MVPP2_PRS_DSA false 832 833 /* MAC entries, shadow udf */ 834 enum mvpp2_prs_udf { 835 MVPP2_PRS_UDF_MAC_DEF, 836 MVPP2_PRS_UDF_MAC_RANGE, 837 MVPP2_PRS_UDF_L2_DEF, 838 MVPP2_PRS_UDF_L2_DEF_COPY, 839 MVPP2_PRS_UDF_L2_USER, 840 }; 841 842 /* Lookup ID */ 843 enum mvpp2_prs_lookup { 844 MVPP2_PRS_LU_MH, 845 MVPP2_PRS_LU_MAC, 846 MVPP2_PRS_LU_DSA, 847 MVPP2_PRS_LU_VLAN, 848 MVPP2_PRS_LU_L2, 849 MVPP2_PRS_LU_PPPOE, 850 MVPP2_PRS_LU_IP4, 851 MVPP2_PRS_LU_IP6, 852 MVPP2_PRS_LU_FLOWS, 853 MVPP2_PRS_LU_LAST, 854 }; 855 856 /* L3 cast enum */ 857 enum mvpp2_prs_l3_cast { 858 MVPP2_PRS_L3_UNI_CAST, 859 MVPP2_PRS_L3_MULTI_CAST, 860 MVPP2_PRS_L3_BROAD_CAST 861 }; 862 863 /* Classifier constants */ 864 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 865 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 866 #define MVPP2_CLS_LKP_TBL_SIZE 64 867 868 /* BM constants */ 869 #define MVPP2_BM_POOLS_NUM 1 870 #define MVPP2_BM_LONG_BUF_NUM 16 871 #define MVPP2_BM_SHORT_BUF_NUM 16 872 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 873 #define MVPP2_BM_POOL_PTR_ALIGN 128 874 #define MVPP2_BM_SWF_LONG_POOL(port) 0 875 876 /* BM cookie (32 bits) definition */ 877 #define MVPP2_BM_COOKIE_POOL_OFFS 8 878 #define MVPP2_BM_COOKIE_CPU_OFFS 24 879 880 /* BM short pool packet size 881 * These value assure that for SWF the total number 882 * of bytes allocated for each buffer will be 512 883 */ 884 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 885 886 enum mvpp2_bm_type { 887 MVPP2_BM_FREE, 888 MVPP2_BM_SWF_LONG, 889 MVPP2_BM_SWF_SHORT 890 }; 891 892 /* Definitions */ 893 894 /* Shared Packet Processor resources */ 895 struct mvpp2 { 896 /* Shared registers' base addresses */ 897 void __iomem *base; 898 void __iomem *lms_base; 899 void __iomem *iface_base; 900 901 void __iomem *mpcs_base; 902 void __iomem *xpcs_base; 903 void __iomem *rfu1_base; 904 905 u32 netc_config; 906 907 /* List of pointers to port structures */ 908 struct mvpp2_port **port_list; 909 910 /* Aggregated TXQs */ 911 struct mvpp2_tx_queue *aggr_txqs; 912 913 /* BM pools */ 914 struct mvpp2_bm_pool *bm_pools; 915 916 /* PRS shadow table */ 917 struct mvpp2_prs_shadow *prs_shadow; 918 /* PRS auxiliary table for double vlan entries control */ 919 bool *prs_double_vlans; 920 921 /* Tclk value */ 922 u32 tclk; 923 924 /* HW version */ 925 enum { MVPP21, MVPP22 } hw_version; 926 927 /* Maximum number of RXQs per port */ 928 unsigned int max_port_rxqs; 929 930 int probe_done; 931 u8 num_ports; 932 }; 933 934 struct mvpp2_pcpu_stats { 935 u64 rx_packets; 936 u64 rx_bytes; 937 u64 tx_packets; 938 u64 tx_bytes; 939 }; 940 941 struct mvpp2_port { 942 u8 id; 943 944 /* Index of the port from the "group of ports" complex point 945 * of view 946 */ 947 int gop_id; 948 949 int irq; 950 951 struct mvpp2 *priv; 952 953 /* Per-port registers' base address */ 954 void __iomem *base; 955 void __iomem *mdio_base; 956 957 struct mvpp2_rx_queue **rxqs; 958 struct mvpp2_tx_queue **txqs; 959 960 int pkt_size; 961 962 u32 pending_cause_rx; 963 964 /* Per-CPU port control */ 965 struct mvpp2_port_pcpu __percpu *pcpu; 966 967 /* Flags */ 968 unsigned long flags; 969 970 u16 tx_ring_size; 971 u16 rx_ring_size; 972 struct mvpp2_pcpu_stats __percpu *stats; 973 974 struct phy_device *phy_dev; 975 phy_interface_t phy_interface; 976 int phy_node; 977 int phyaddr; 978 struct mii_dev *bus; 979 #ifdef CONFIG_DM_GPIO 980 struct gpio_desc phy_reset_gpio; 981 struct gpio_desc phy_tx_disable_gpio; 982 #endif 983 int init; 984 unsigned int link; 985 unsigned int duplex; 986 unsigned int speed; 987 988 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 989 990 struct mvpp2_bm_pool *pool_long; 991 struct mvpp2_bm_pool *pool_short; 992 993 /* Index of first port's physical RXQ */ 994 u8 first_rxq; 995 996 u8 dev_addr[ETH_ALEN]; 997 }; 998 999 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1000 * layout of the transmit and reception DMA descriptors, and their 1001 * layout is therefore defined by the hardware design 1002 */ 1003 1004 #define MVPP2_TXD_L3_OFF_SHIFT 0 1005 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1006 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1007 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1008 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1009 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1010 #define MVPP2_TXD_L4_UDP BIT(24) 1011 #define MVPP2_TXD_L3_IP6 BIT(26) 1012 #define MVPP2_TXD_L_DESC BIT(28) 1013 #define MVPP2_TXD_F_DESC BIT(29) 1014 1015 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1016 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1017 #define MVPP2_RXD_ERR_CRC 0x0 1018 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1019 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1020 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1021 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1022 #define MVPP2_RXD_HWF_SYNC BIT(21) 1023 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1024 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1025 #define MVPP2_RXD_L4_TCP BIT(25) 1026 #define MVPP2_RXD_L4_UDP BIT(26) 1027 #define MVPP2_RXD_L3_IP4 BIT(28) 1028 #define MVPP2_RXD_L3_IP6 BIT(30) 1029 #define MVPP2_RXD_BUF_HDR BIT(31) 1030 1031 /* HW TX descriptor for PPv2.1 */ 1032 struct mvpp21_tx_desc { 1033 u32 command; /* Options used by HW for packet transmitting.*/ 1034 u8 packet_offset; /* the offset from the buffer beginning */ 1035 u8 phys_txq; /* destination queue ID */ 1036 u16 data_size; /* data size of transmitted packet in bytes */ 1037 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1038 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1039 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1040 u32 reserved2; /* reserved (for future use) */ 1041 }; 1042 1043 /* HW RX descriptor for PPv2.1 */ 1044 struct mvpp21_rx_desc { 1045 u32 status; /* info about received packet */ 1046 u16 reserved1; /* parser_info (for future use, PnC) */ 1047 u16 data_size; /* size of received packet in bytes */ 1048 u32 buf_dma_addr; /* physical address of the buffer */ 1049 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1050 u16 reserved2; /* gem_port_id (for future use, PON) */ 1051 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1052 u8 reserved4; /* bm_qset (for future use, BM) */ 1053 u8 reserved5; 1054 u16 reserved6; /* classify_info (for future use, PnC) */ 1055 u32 reserved7; /* flow_id (for future use, PnC) */ 1056 u32 reserved8; 1057 }; 1058 1059 /* HW TX descriptor for PPv2.2 */ 1060 struct mvpp22_tx_desc { 1061 u32 command; 1062 u8 packet_offset; 1063 u8 phys_txq; 1064 u16 data_size; 1065 u64 reserved1; 1066 u64 buf_dma_addr_ptp; 1067 u64 buf_cookie_misc; 1068 }; 1069 1070 /* HW RX descriptor for PPv2.2 */ 1071 struct mvpp22_rx_desc { 1072 u32 status; 1073 u16 reserved1; 1074 u16 data_size; 1075 u32 reserved2; 1076 u32 reserved3; 1077 u64 buf_dma_addr_key_hash; 1078 u64 buf_cookie_misc; 1079 }; 1080 1081 /* Opaque type used by the driver to manipulate the HW TX and RX 1082 * descriptors 1083 */ 1084 struct mvpp2_tx_desc { 1085 union { 1086 struct mvpp21_tx_desc pp21; 1087 struct mvpp22_tx_desc pp22; 1088 }; 1089 }; 1090 1091 struct mvpp2_rx_desc { 1092 union { 1093 struct mvpp21_rx_desc pp21; 1094 struct mvpp22_rx_desc pp22; 1095 }; 1096 }; 1097 1098 /* Per-CPU Tx queue control */ 1099 struct mvpp2_txq_pcpu { 1100 int cpu; 1101 1102 /* Number of Tx DMA descriptors in the descriptor ring */ 1103 int size; 1104 1105 /* Number of currently used Tx DMA descriptor in the 1106 * descriptor ring 1107 */ 1108 int count; 1109 1110 /* Number of Tx DMA descriptors reserved for each CPU */ 1111 int reserved_num; 1112 1113 /* Index of last TX DMA descriptor that was inserted */ 1114 int txq_put_index; 1115 1116 /* Index of the TX DMA descriptor to be cleaned up */ 1117 int txq_get_index; 1118 }; 1119 1120 struct mvpp2_tx_queue { 1121 /* Physical number of this Tx queue */ 1122 u8 id; 1123 1124 /* Logical number of this Tx queue */ 1125 u8 log_id; 1126 1127 /* Number of Tx DMA descriptors in the descriptor ring */ 1128 int size; 1129 1130 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1131 int count; 1132 1133 /* Per-CPU control of physical Tx queues */ 1134 struct mvpp2_txq_pcpu __percpu *pcpu; 1135 1136 u32 done_pkts_coal; 1137 1138 /* Virtual address of thex Tx DMA descriptors array */ 1139 struct mvpp2_tx_desc *descs; 1140 1141 /* DMA address of the Tx DMA descriptors array */ 1142 dma_addr_t descs_dma; 1143 1144 /* Index of the last Tx DMA descriptor */ 1145 int last_desc; 1146 1147 /* Index of the next Tx DMA descriptor to process */ 1148 int next_desc_to_proc; 1149 }; 1150 1151 struct mvpp2_rx_queue { 1152 /* RX queue number, in the range 0-31 for physical RXQs */ 1153 u8 id; 1154 1155 /* Num of rx descriptors in the rx descriptor ring */ 1156 int size; 1157 1158 u32 pkts_coal; 1159 u32 time_coal; 1160 1161 /* Virtual address of the RX DMA descriptors array */ 1162 struct mvpp2_rx_desc *descs; 1163 1164 /* DMA address of the RX DMA descriptors array */ 1165 dma_addr_t descs_dma; 1166 1167 /* Index of the last RX DMA descriptor */ 1168 int last_desc; 1169 1170 /* Index of the next RX DMA descriptor to process */ 1171 int next_desc_to_proc; 1172 1173 /* ID of port to which physical RXQ is mapped */ 1174 int port; 1175 1176 /* Port's logic RXQ number to which physical RXQ is mapped */ 1177 int logic_rxq; 1178 }; 1179 1180 union mvpp2_prs_tcam_entry { 1181 u32 word[MVPP2_PRS_TCAM_WORDS]; 1182 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1183 }; 1184 1185 union mvpp2_prs_sram_entry { 1186 u32 word[MVPP2_PRS_SRAM_WORDS]; 1187 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1188 }; 1189 1190 struct mvpp2_prs_entry { 1191 u32 index; 1192 union mvpp2_prs_tcam_entry tcam; 1193 union mvpp2_prs_sram_entry sram; 1194 }; 1195 1196 struct mvpp2_prs_shadow { 1197 bool valid; 1198 bool finish; 1199 1200 /* Lookup ID */ 1201 int lu; 1202 1203 /* User defined offset */ 1204 int udf; 1205 1206 /* Result info */ 1207 u32 ri; 1208 u32 ri_mask; 1209 }; 1210 1211 struct mvpp2_cls_flow_entry { 1212 u32 index; 1213 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1214 }; 1215 1216 struct mvpp2_cls_lookup_entry { 1217 u32 lkpid; 1218 u32 way; 1219 u32 data; 1220 }; 1221 1222 struct mvpp2_bm_pool { 1223 /* Pool number in the range 0-7 */ 1224 int id; 1225 enum mvpp2_bm_type type; 1226 1227 /* Buffer Pointers Pool External (BPPE) size */ 1228 int size; 1229 /* Number of buffers for this pool */ 1230 int buf_num; 1231 /* Pool buffer size */ 1232 int buf_size; 1233 /* Packet size */ 1234 int pkt_size; 1235 1236 /* BPPE virtual base address */ 1237 unsigned long *virt_addr; 1238 /* BPPE DMA base address */ 1239 dma_addr_t dma_addr; 1240 1241 /* Ports using BM pool */ 1242 u32 port_map; 1243 }; 1244 1245 /* Static declaractions */ 1246 1247 /* Number of RXQs used by single port */ 1248 static int rxq_number = MVPP2_DEFAULT_RXQ; 1249 /* Number of TXQs used by single port */ 1250 static int txq_number = MVPP2_DEFAULT_TXQ; 1251 1252 static int base_id; 1253 1254 #define MVPP2_DRIVER_NAME "mvpp2" 1255 #define MVPP2_DRIVER_VERSION "1.0" 1256 1257 /* 1258 * U-Boot internal data, mostly uncached buffers for descriptors and data 1259 */ 1260 struct buffer_location { 1261 struct mvpp2_tx_desc *aggr_tx_descs; 1262 struct mvpp2_tx_desc *tx_descs; 1263 struct mvpp2_rx_desc *rx_descs; 1264 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1265 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1266 int first_rxq; 1267 }; 1268 1269 /* 1270 * All 4 interfaces use the same global buffer, since only one interface 1271 * can be enabled at once 1272 */ 1273 static struct buffer_location buffer_loc; 1274 1275 /* 1276 * Page table entries are set to 1MB, or multiples of 1MB 1277 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1278 */ 1279 #define BD_SPACE (1 << 20) 1280 1281 /* Utility/helper methods */ 1282 1283 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1284 { 1285 writel(data, priv->base + offset); 1286 } 1287 1288 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1289 { 1290 return readl(priv->base + offset); 1291 } 1292 1293 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1294 struct mvpp2_tx_desc *tx_desc, 1295 dma_addr_t dma_addr) 1296 { 1297 if (port->priv->hw_version == MVPP21) { 1298 tx_desc->pp21.buf_dma_addr = dma_addr; 1299 } else { 1300 u64 val = (u64)dma_addr; 1301 1302 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1303 tx_desc->pp22.buf_dma_addr_ptp |= val; 1304 } 1305 } 1306 1307 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1308 struct mvpp2_tx_desc *tx_desc, 1309 size_t size) 1310 { 1311 if (port->priv->hw_version == MVPP21) 1312 tx_desc->pp21.data_size = size; 1313 else 1314 tx_desc->pp22.data_size = size; 1315 } 1316 1317 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1318 struct mvpp2_tx_desc *tx_desc, 1319 unsigned int txq) 1320 { 1321 if (port->priv->hw_version == MVPP21) 1322 tx_desc->pp21.phys_txq = txq; 1323 else 1324 tx_desc->pp22.phys_txq = txq; 1325 } 1326 1327 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1328 struct mvpp2_tx_desc *tx_desc, 1329 unsigned int command) 1330 { 1331 if (port->priv->hw_version == MVPP21) 1332 tx_desc->pp21.command = command; 1333 else 1334 tx_desc->pp22.command = command; 1335 } 1336 1337 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1338 struct mvpp2_tx_desc *tx_desc, 1339 unsigned int offset) 1340 { 1341 if (port->priv->hw_version == MVPP21) 1342 tx_desc->pp21.packet_offset = offset; 1343 else 1344 tx_desc->pp22.packet_offset = offset; 1345 } 1346 1347 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1348 struct mvpp2_rx_desc *rx_desc) 1349 { 1350 if (port->priv->hw_version == MVPP21) 1351 return rx_desc->pp21.buf_dma_addr; 1352 else 1353 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1354 } 1355 1356 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1357 struct mvpp2_rx_desc *rx_desc) 1358 { 1359 if (port->priv->hw_version == MVPP21) 1360 return rx_desc->pp21.buf_cookie; 1361 else 1362 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1363 } 1364 1365 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1366 struct mvpp2_rx_desc *rx_desc) 1367 { 1368 if (port->priv->hw_version == MVPP21) 1369 return rx_desc->pp21.data_size; 1370 else 1371 return rx_desc->pp22.data_size; 1372 } 1373 1374 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1375 struct mvpp2_rx_desc *rx_desc) 1376 { 1377 if (port->priv->hw_version == MVPP21) 1378 return rx_desc->pp21.status; 1379 else 1380 return rx_desc->pp22.status; 1381 } 1382 1383 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1384 { 1385 txq_pcpu->txq_get_index++; 1386 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1387 txq_pcpu->txq_get_index = 0; 1388 } 1389 1390 /* Get number of physical egress port */ 1391 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1392 { 1393 return MVPP2_MAX_TCONT + port->id; 1394 } 1395 1396 /* Get number of physical TXQ */ 1397 static inline int mvpp2_txq_phys(int port, int txq) 1398 { 1399 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1400 } 1401 1402 /* Parser configuration routines */ 1403 1404 /* Update parser tcam and sram hw entries */ 1405 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1406 { 1407 int i; 1408 1409 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1410 return -EINVAL; 1411 1412 /* Clear entry invalidation bit */ 1413 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1414 1415 /* Write tcam index - indirect access */ 1416 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1417 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1418 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1419 1420 /* Write sram index - indirect access */ 1421 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1422 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1423 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1424 1425 return 0; 1426 } 1427 1428 /* Read tcam entry from hw */ 1429 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1430 { 1431 int i; 1432 1433 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1434 return -EINVAL; 1435 1436 /* Write tcam index - indirect access */ 1437 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1438 1439 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1440 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1441 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1442 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1443 1444 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1445 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1446 1447 /* Write sram index - indirect access */ 1448 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1449 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1450 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1451 1452 return 0; 1453 } 1454 1455 /* Invalidate tcam hw entry */ 1456 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1457 { 1458 /* Write index - indirect access */ 1459 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1460 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1461 MVPP2_PRS_TCAM_INV_MASK); 1462 } 1463 1464 /* Enable shadow table entry and set its lookup ID */ 1465 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1466 { 1467 priv->prs_shadow[index].valid = true; 1468 priv->prs_shadow[index].lu = lu; 1469 } 1470 1471 /* Update ri fields in shadow table entry */ 1472 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1473 unsigned int ri, unsigned int ri_mask) 1474 { 1475 priv->prs_shadow[index].ri_mask = ri_mask; 1476 priv->prs_shadow[index].ri = ri; 1477 } 1478 1479 /* Update lookup field in tcam sw entry */ 1480 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1481 { 1482 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1483 1484 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1485 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1486 } 1487 1488 /* Update mask for single port in tcam sw entry */ 1489 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1490 unsigned int port, bool add) 1491 { 1492 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1493 1494 if (add) 1495 pe->tcam.byte[enable_off] &= ~(1 << port); 1496 else 1497 pe->tcam.byte[enable_off] |= 1 << port; 1498 } 1499 1500 /* Update port map in tcam sw entry */ 1501 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1502 unsigned int ports) 1503 { 1504 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1505 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1506 1507 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1508 pe->tcam.byte[enable_off] &= ~port_mask; 1509 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1510 } 1511 1512 /* Obtain port map from tcam sw entry */ 1513 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1514 { 1515 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1516 1517 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1518 } 1519 1520 /* Set byte of data and its enable bits in tcam sw entry */ 1521 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1522 unsigned int offs, unsigned char byte, 1523 unsigned char enable) 1524 { 1525 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1526 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1527 } 1528 1529 /* Get byte of data and its enable bits from tcam sw entry */ 1530 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1531 unsigned int offs, unsigned char *byte, 1532 unsigned char *enable) 1533 { 1534 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1535 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1536 } 1537 1538 /* Set ethertype in tcam sw entry */ 1539 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1540 unsigned short ethertype) 1541 { 1542 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1543 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1544 } 1545 1546 /* Set bits in sram sw entry */ 1547 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1548 int val) 1549 { 1550 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1551 } 1552 1553 /* Clear bits in sram sw entry */ 1554 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1555 int val) 1556 { 1557 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1558 } 1559 1560 /* Update ri bits in sram sw entry */ 1561 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1562 unsigned int bits, unsigned int mask) 1563 { 1564 unsigned int i; 1565 1566 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1567 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1568 1569 if (!(mask & BIT(i))) 1570 continue; 1571 1572 if (bits & BIT(i)) 1573 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1574 else 1575 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1576 1577 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1578 } 1579 } 1580 1581 /* Update ai bits in sram sw entry */ 1582 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1583 unsigned int bits, unsigned int mask) 1584 { 1585 unsigned int i; 1586 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1587 1588 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1589 1590 if (!(mask & BIT(i))) 1591 continue; 1592 1593 if (bits & BIT(i)) 1594 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1595 else 1596 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1597 1598 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1599 } 1600 } 1601 1602 /* Read ai bits from sram sw entry */ 1603 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1604 { 1605 u8 bits; 1606 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1607 int ai_en_off = ai_off + 1; 1608 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1609 1610 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1611 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1612 1613 return bits; 1614 } 1615 1616 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1617 * lookup interation 1618 */ 1619 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1620 unsigned int lu) 1621 { 1622 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1623 1624 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1625 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1626 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1627 } 1628 1629 /* In the sram sw entry set sign and value of the next lookup offset 1630 * and the offset value generated to the classifier 1631 */ 1632 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1633 unsigned int op) 1634 { 1635 /* Set sign */ 1636 if (shift < 0) { 1637 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1638 shift = 0 - shift; 1639 } else { 1640 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1641 } 1642 1643 /* Set value */ 1644 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1645 (unsigned char)shift; 1646 1647 /* Reset and set operation */ 1648 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1649 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1650 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1651 1652 /* Set base offset as current */ 1653 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1654 } 1655 1656 /* In the sram sw entry set sign and value of the user defined offset 1657 * generated to the classifier 1658 */ 1659 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1660 unsigned int type, int offset, 1661 unsigned int op) 1662 { 1663 /* Set sign */ 1664 if (offset < 0) { 1665 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1666 offset = 0 - offset; 1667 } else { 1668 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1669 } 1670 1671 /* Set value */ 1672 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1673 MVPP2_PRS_SRAM_UDF_MASK); 1674 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1675 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1676 MVPP2_PRS_SRAM_UDF_BITS)] &= 1677 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1678 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1679 MVPP2_PRS_SRAM_UDF_BITS)] |= 1680 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1681 1682 /* Set offset type */ 1683 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1684 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1685 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1686 1687 /* Set offset operation */ 1688 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1689 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1690 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1691 1692 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1693 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1694 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1695 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1696 1697 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1698 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1699 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1700 1701 /* Set base offset as current */ 1702 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1703 } 1704 1705 /* Find parser flow entry */ 1706 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1707 { 1708 struct mvpp2_prs_entry *pe; 1709 int tid; 1710 1711 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1712 if (!pe) 1713 return NULL; 1714 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1715 1716 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1717 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1718 u8 bits; 1719 1720 if (!priv->prs_shadow[tid].valid || 1721 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1722 continue; 1723 1724 pe->index = tid; 1725 mvpp2_prs_hw_read(priv, pe); 1726 bits = mvpp2_prs_sram_ai_get(pe); 1727 1728 /* Sram store classification lookup ID in AI bits [5:0] */ 1729 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1730 return pe; 1731 } 1732 kfree(pe); 1733 1734 return NULL; 1735 } 1736 1737 /* Return first free tcam index, seeking from start to end */ 1738 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1739 unsigned char end) 1740 { 1741 int tid; 1742 1743 if (start > end) 1744 swap(start, end); 1745 1746 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1747 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1748 1749 for (tid = start; tid <= end; tid++) { 1750 if (!priv->prs_shadow[tid].valid) 1751 return tid; 1752 } 1753 1754 return -EINVAL; 1755 } 1756 1757 /* Enable/disable dropping all mac da's */ 1758 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1759 { 1760 struct mvpp2_prs_entry pe; 1761 1762 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1763 /* Entry exist - update port only */ 1764 pe.index = MVPP2_PE_DROP_ALL; 1765 mvpp2_prs_hw_read(priv, &pe); 1766 } else { 1767 /* Entry doesn't exist - create new */ 1768 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1769 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1770 pe.index = MVPP2_PE_DROP_ALL; 1771 1772 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1773 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1774 MVPP2_PRS_RI_DROP_MASK); 1775 1776 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1777 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1778 1779 /* Update shadow table */ 1780 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1781 1782 /* Mask all ports */ 1783 mvpp2_prs_tcam_port_map_set(&pe, 0); 1784 } 1785 1786 /* Update port mask */ 1787 mvpp2_prs_tcam_port_set(&pe, port, add); 1788 1789 mvpp2_prs_hw_write(priv, &pe); 1790 } 1791 1792 /* Set port to promiscuous mode */ 1793 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1794 { 1795 struct mvpp2_prs_entry pe; 1796 1797 /* Promiscuous mode - Accept unknown packets */ 1798 1799 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1800 /* Entry exist - update port only */ 1801 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1802 mvpp2_prs_hw_read(priv, &pe); 1803 } else { 1804 /* Entry doesn't exist - create new */ 1805 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1806 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1807 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1808 1809 /* Continue - set next lookup */ 1810 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1811 1812 /* Set result info bits */ 1813 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1814 MVPP2_PRS_RI_L2_CAST_MASK); 1815 1816 /* Shift to ethertype */ 1817 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1818 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1819 1820 /* Mask all ports */ 1821 mvpp2_prs_tcam_port_map_set(&pe, 0); 1822 1823 /* Update shadow table */ 1824 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1825 } 1826 1827 /* Update port mask */ 1828 mvpp2_prs_tcam_port_set(&pe, port, add); 1829 1830 mvpp2_prs_hw_write(priv, &pe); 1831 } 1832 1833 /* Accept multicast */ 1834 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1835 bool add) 1836 { 1837 struct mvpp2_prs_entry pe; 1838 unsigned char da_mc; 1839 1840 /* Ethernet multicast address first byte is 1841 * 0x01 for IPv4 and 0x33 for IPv6 1842 */ 1843 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1844 1845 if (priv->prs_shadow[index].valid) { 1846 /* Entry exist - update port only */ 1847 pe.index = index; 1848 mvpp2_prs_hw_read(priv, &pe); 1849 } else { 1850 /* Entry doesn't exist - create new */ 1851 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1852 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1853 pe.index = index; 1854 1855 /* Continue - set next lookup */ 1856 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1857 1858 /* Set result info bits */ 1859 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1860 MVPP2_PRS_RI_L2_CAST_MASK); 1861 1862 /* Update tcam entry data first byte */ 1863 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1864 1865 /* Shift to ethertype */ 1866 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1867 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1868 1869 /* Mask all ports */ 1870 mvpp2_prs_tcam_port_map_set(&pe, 0); 1871 1872 /* Update shadow table */ 1873 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1874 } 1875 1876 /* Update port mask */ 1877 mvpp2_prs_tcam_port_set(&pe, port, add); 1878 1879 mvpp2_prs_hw_write(priv, &pe); 1880 } 1881 1882 /* Parser per-port initialization */ 1883 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1884 int lu_max, int offset) 1885 { 1886 u32 val; 1887 1888 /* Set lookup ID */ 1889 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1890 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1891 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1892 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1893 1894 /* Set maximum number of loops for packet received from port */ 1895 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1896 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1897 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1898 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1899 1900 /* Set initial offset for packet header extraction for the first 1901 * searching loop 1902 */ 1903 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1904 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1905 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1906 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1907 } 1908 1909 /* Default flow entries initialization for all ports */ 1910 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1911 { 1912 struct mvpp2_prs_entry pe; 1913 int port; 1914 1915 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1916 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1917 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1918 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1919 1920 /* Mask all ports */ 1921 mvpp2_prs_tcam_port_map_set(&pe, 0); 1922 1923 /* Set flow ID*/ 1924 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1925 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1926 1927 /* Update shadow table and hw entry */ 1928 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1929 mvpp2_prs_hw_write(priv, &pe); 1930 } 1931 } 1932 1933 /* Set default entry for Marvell Header field */ 1934 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1935 { 1936 struct mvpp2_prs_entry pe; 1937 1938 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1939 1940 pe.index = MVPP2_PE_MH_DEFAULT; 1941 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1942 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1943 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1944 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1945 1946 /* Unmask all ports */ 1947 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1948 1949 /* Update shadow table and hw entry */ 1950 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1951 mvpp2_prs_hw_write(priv, &pe); 1952 } 1953 1954 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1955 * multicast MAC addresses 1956 */ 1957 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1958 { 1959 struct mvpp2_prs_entry pe; 1960 1961 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1962 1963 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1964 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1965 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1966 1967 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1968 MVPP2_PRS_RI_DROP_MASK); 1969 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1970 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1971 1972 /* Unmask all ports */ 1973 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1974 1975 /* Update shadow table and hw entry */ 1976 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1977 mvpp2_prs_hw_write(priv, &pe); 1978 1979 /* place holders only - no ports */ 1980 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1981 mvpp2_prs_mac_promisc_set(priv, 0, false); 1982 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1983 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1984 } 1985 1986 /* Match basic ethertypes */ 1987 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1988 { 1989 struct mvpp2_prs_entry pe; 1990 int tid; 1991 1992 /* Ethertype: PPPoE */ 1993 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1994 MVPP2_PE_LAST_FREE_TID); 1995 if (tid < 0) 1996 return tid; 1997 1998 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1999 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2000 pe.index = tid; 2001 2002 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2003 2004 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2005 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2006 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2007 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2008 MVPP2_PRS_RI_PPPOE_MASK); 2009 2010 /* Update shadow table and hw entry */ 2011 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2012 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2013 priv->prs_shadow[pe.index].finish = false; 2014 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2015 MVPP2_PRS_RI_PPPOE_MASK); 2016 mvpp2_prs_hw_write(priv, &pe); 2017 2018 /* Ethertype: ARP */ 2019 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2020 MVPP2_PE_LAST_FREE_TID); 2021 if (tid < 0) 2022 return tid; 2023 2024 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2025 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2026 pe.index = tid; 2027 2028 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2029 2030 /* Generate flow in the next iteration*/ 2031 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2032 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2033 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2034 MVPP2_PRS_RI_L3_PROTO_MASK); 2035 /* Set L3 offset */ 2036 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2037 MVPP2_ETH_TYPE_LEN, 2038 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2039 2040 /* Update shadow table and hw entry */ 2041 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2042 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2043 priv->prs_shadow[pe.index].finish = true; 2044 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2045 MVPP2_PRS_RI_L3_PROTO_MASK); 2046 mvpp2_prs_hw_write(priv, &pe); 2047 2048 /* Ethertype: LBTD */ 2049 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2050 MVPP2_PE_LAST_FREE_TID); 2051 if (tid < 0) 2052 return tid; 2053 2054 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2055 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2056 pe.index = tid; 2057 2058 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2059 2060 /* Generate flow in the next iteration*/ 2061 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2062 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2063 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2064 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2065 MVPP2_PRS_RI_CPU_CODE_MASK | 2066 MVPP2_PRS_RI_UDF3_MASK); 2067 /* Set L3 offset */ 2068 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2069 MVPP2_ETH_TYPE_LEN, 2070 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2071 2072 /* Update shadow table and hw entry */ 2073 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2074 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2075 priv->prs_shadow[pe.index].finish = true; 2076 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2077 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2078 MVPP2_PRS_RI_CPU_CODE_MASK | 2079 MVPP2_PRS_RI_UDF3_MASK); 2080 mvpp2_prs_hw_write(priv, &pe); 2081 2082 /* Ethertype: IPv4 without options */ 2083 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2084 MVPP2_PE_LAST_FREE_TID); 2085 if (tid < 0) 2086 return tid; 2087 2088 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2089 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2090 pe.index = tid; 2091 2092 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2093 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2094 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2095 MVPP2_PRS_IPV4_HEAD_MASK | 2096 MVPP2_PRS_IPV4_IHL_MASK); 2097 2098 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2099 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2100 MVPP2_PRS_RI_L3_PROTO_MASK); 2101 /* Skip eth_type + 4 bytes of IP header */ 2102 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2103 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2104 /* Set L3 offset */ 2105 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2106 MVPP2_ETH_TYPE_LEN, 2107 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2108 2109 /* Update shadow table and hw entry */ 2110 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2111 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2112 priv->prs_shadow[pe.index].finish = false; 2113 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2114 MVPP2_PRS_RI_L3_PROTO_MASK); 2115 mvpp2_prs_hw_write(priv, &pe); 2116 2117 /* Ethertype: IPv4 with options */ 2118 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2119 MVPP2_PE_LAST_FREE_TID); 2120 if (tid < 0) 2121 return tid; 2122 2123 pe.index = tid; 2124 2125 /* Clear tcam data before updating */ 2126 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2127 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2128 2129 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2130 MVPP2_PRS_IPV4_HEAD, 2131 MVPP2_PRS_IPV4_HEAD_MASK); 2132 2133 /* Clear ri before updating */ 2134 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2135 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2136 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2137 MVPP2_PRS_RI_L3_PROTO_MASK); 2138 2139 /* Update shadow table and hw entry */ 2140 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2141 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2142 priv->prs_shadow[pe.index].finish = false; 2143 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2144 MVPP2_PRS_RI_L3_PROTO_MASK); 2145 mvpp2_prs_hw_write(priv, &pe); 2146 2147 /* Ethertype: IPv6 without options */ 2148 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2149 MVPP2_PE_LAST_FREE_TID); 2150 if (tid < 0) 2151 return tid; 2152 2153 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2154 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2155 pe.index = tid; 2156 2157 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2158 2159 /* Skip DIP of IPV6 header */ 2160 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2161 MVPP2_MAX_L3_ADDR_SIZE, 2162 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2163 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2164 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2165 MVPP2_PRS_RI_L3_PROTO_MASK); 2166 /* Set L3 offset */ 2167 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2168 MVPP2_ETH_TYPE_LEN, 2169 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2170 2171 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2172 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2173 priv->prs_shadow[pe.index].finish = false; 2174 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2175 MVPP2_PRS_RI_L3_PROTO_MASK); 2176 mvpp2_prs_hw_write(priv, &pe); 2177 2178 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2179 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2180 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2181 pe.index = MVPP2_PE_ETH_TYPE_UN; 2182 2183 /* Unmask all ports */ 2184 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2185 2186 /* Generate flow in the next iteration*/ 2187 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2188 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2189 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2190 MVPP2_PRS_RI_L3_PROTO_MASK); 2191 /* Set L3 offset even it's unknown L3 */ 2192 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2193 MVPP2_ETH_TYPE_LEN, 2194 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2195 2196 /* Update shadow table and hw entry */ 2197 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2198 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2199 priv->prs_shadow[pe.index].finish = true; 2200 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2201 MVPP2_PRS_RI_L3_PROTO_MASK); 2202 mvpp2_prs_hw_write(priv, &pe); 2203 2204 return 0; 2205 } 2206 2207 /* Parser default initialization */ 2208 static int mvpp2_prs_default_init(struct udevice *dev, 2209 struct mvpp2 *priv) 2210 { 2211 int err, index, i; 2212 2213 /* Enable tcam table */ 2214 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2215 2216 /* Clear all tcam and sram entries */ 2217 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2218 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2219 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2220 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2221 2222 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2223 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2224 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2225 } 2226 2227 /* Invalidate all tcam entries */ 2228 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2229 mvpp2_prs_hw_inv(priv, index); 2230 2231 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2232 sizeof(struct mvpp2_prs_shadow), 2233 GFP_KERNEL); 2234 if (!priv->prs_shadow) 2235 return -ENOMEM; 2236 2237 /* Always start from lookup = 0 */ 2238 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2239 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2240 MVPP2_PRS_PORT_LU_MAX, 0); 2241 2242 mvpp2_prs_def_flow_init(priv); 2243 2244 mvpp2_prs_mh_init(priv); 2245 2246 mvpp2_prs_mac_init(priv); 2247 2248 err = mvpp2_prs_etype_init(priv); 2249 if (err) 2250 return err; 2251 2252 return 0; 2253 } 2254 2255 /* Compare MAC DA with tcam entry data */ 2256 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2257 const u8 *da, unsigned char *mask) 2258 { 2259 unsigned char tcam_byte, tcam_mask; 2260 int index; 2261 2262 for (index = 0; index < ETH_ALEN; index++) { 2263 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2264 if (tcam_mask != mask[index]) 2265 return false; 2266 2267 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2268 return false; 2269 } 2270 2271 return true; 2272 } 2273 2274 /* Find tcam entry with matched pair <MAC DA, port> */ 2275 static struct mvpp2_prs_entry * 2276 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2277 unsigned char *mask, int udf_type) 2278 { 2279 struct mvpp2_prs_entry *pe; 2280 int tid; 2281 2282 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2283 if (!pe) 2284 return NULL; 2285 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2286 2287 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2288 for (tid = MVPP2_PE_FIRST_FREE_TID; 2289 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2290 unsigned int entry_pmap; 2291 2292 if (!priv->prs_shadow[tid].valid || 2293 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2294 (priv->prs_shadow[tid].udf != udf_type)) 2295 continue; 2296 2297 pe->index = tid; 2298 mvpp2_prs_hw_read(priv, pe); 2299 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2300 2301 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2302 entry_pmap == pmap) 2303 return pe; 2304 } 2305 kfree(pe); 2306 2307 return NULL; 2308 } 2309 2310 /* Update parser's mac da entry */ 2311 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2312 const u8 *da, bool add) 2313 { 2314 struct mvpp2_prs_entry *pe; 2315 unsigned int pmap, len, ri; 2316 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2317 int tid; 2318 2319 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2320 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2321 MVPP2_PRS_UDF_MAC_DEF); 2322 2323 /* No such entry */ 2324 if (!pe) { 2325 if (!add) 2326 return 0; 2327 2328 /* Create new TCAM entry */ 2329 /* Find first range mac entry*/ 2330 for (tid = MVPP2_PE_FIRST_FREE_TID; 2331 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2332 if (priv->prs_shadow[tid].valid && 2333 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2334 (priv->prs_shadow[tid].udf == 2335 MVPP2_PRS_UDF_MAC_RANGE)) 2336 break; 2337 2338 /* Go through the all entries from first to last */ 2339 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2340 tid - 1); 2341 if (tid < 0) 2342 return tid; 2343 2344 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2345 if (!pe) 2346 return -1; 2347 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2348 pe->index = tid; 2349 2350 /* Mask all ports */ 2351 mvpp2_prs_tcam_port_map_set(pe, 0); 2352 } 2353 2354 /* Update port mask */ 2355 mvpp2_prs_tcam_port_set(pe, port, add); 2356 2357 /* Invalidate the entry if no ports are left enabled */ 2358 pmap = mvpp2_prs_tcam_port_map_get(pe); 2359 if (pmap == 0) { 2360 if (add) { 2361 kfree(pe); 2362 return -1; 2363 } 2364 mvpp2_prs_hw_inv(priv, pe->index); 2365 priv->prs_shadow[pe->index].valid = false; 2366 kfree(pe); 2367 return 0; 2368 } 2369 2370 /* Continue - set next lookup */ 2371 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2372 2373 /* Set match on DA */ 2374 len = ETH_ALEN; 2375 while (len--) 2376 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2377 2378 /* Set result info bits */ 2379 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2380 2381 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2382 MVPP2_PRS_RI_MAC_ME_MASK); 2383 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2384 MVPP2_PRS_RI_MAC_ME_MASK); 2385 2386 /* Shift to ethertype */ 2387 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2388 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2389 2390 /* Update shadow table and hw entry */ 2391 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2392 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2393 mvpp2_prs_hw_write(priv, pe); 2394 2395 kfree(pe); 2396 2397 return 0; 2398 } 2399 2400 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2401 { 2402 int err; 2403 2404 /* Remove old parser entry */ 2405 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2406 false); 2407 if (err) 2408 return err; 2409 2410 /* Add new parser entry */ 2411 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2412 if (err) 2413 return err; 2414 2415 /* Set addr in the device */ 2416 memcpy(port->dev_addr, da, ETH_ALEN); 2417 2418 return 0; 2419 } 2420 2421 /* Set prs flow for the port */ 2422 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2423 { 2424 struct mvpp2_prs_entry *pe; 2425 int tid; 2426 2427 pe = mvpp2_prs_flow_find(port->priv, port->id); 2428 2429 /* Such entry not exist */ 2430 if (!pe) { 2431 /* Go through the all entires from last to first */ 2432 tid = mvpp2_prs_tcam_first_free(port->priv, 2433 MVPP2_PE_LAST_FREE_TID, 2434 MVPP2_PE_FIRST_FREE_TID); 2435 if (tid < 0) 2436 return tid; 2437 2438 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2439 if (!pe) 2440 return -ENOMEM; 2441 2442 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2443 pe->index = tid; 2444 2445 /* Set flow ID*/ 2446 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2447 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2448 2449 /* Update shadow table */ 2450 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2451 } 2452 2453 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2454 mvpp2_prs_hw_write(port->priv, pe); 2455 kfree(pe); 2456 2457 return 0; 2458 } 2459 2460 /* Classifier configuration routines */ 2461 2462 /* Update classification flow table registers */ 2463 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2464 struct mvpp2_cls_flow_entry *fe) 2465 { 2466 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2467 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2468 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2469 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2470 } 2471 2472 /* Update classification lookup table register */ 2473 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2474 struct mvpp2_cls_lookup_entry *le) 2475 { 2476 u32 val; 2477 2478 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2479 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2480 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2481 } 2482 2483 /* Classifier default initialization */ 2484 static void mvpp2_cls_init(struct mvpp2 *priv) 2485 { 2486 struct mvpp2_cls_lookup_entry le; 2487 struct mvpp2_cls_flow_entry fe; 2488 int index; 2489 2490 /* Enable classifier */ 2491 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2492 2493 /* Clear classifier flow table */ 2494 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2495 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2496 fe.index = index; 2497 mvpp2_cls_flow_write(priv, &fe); 2498 } 2499 2500 /* Clear classifier lookup table */ 2501 le.data = 0; 2502 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2503 le.lkpid = index; 2504 le.way = 0; 2505 mvpp2_cls_lookup_write(priv, &le); 2506 2507 le.way = 1; 2508 mvpp2_cls_lookup_write(priv, &le); 2509 } 2510 } 2511 2512 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2513 { 2514 struct mvpp2_cls_lookup_entry le; 2515 u32 val; 2516 2517 /* Set way for the port */ 2518 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2519 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2520 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2521 2522 /* Pick the entry to be accessed in lookup ID decoding table 2523 * according to the way and lkpid. 2524 */ 2525 le.lkpid = port->id; 2526 le.way = 0; 2527 le.data = 0; 2528 2529 /* Set initial CPU queue for receiving packets */ 2530 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2531 le.data |= port->first_rxq; 2532 2533 /* Disable classification engines */ 2534 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2535 2536 /* Update lookup ID table entry */ 2537 mvpp2_cls_lookup_write(port->priv, &le); 2538 } 2539 2540 /* Set CPU queue number for oversize packets */ 2541 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2542 { 2543 u32 val; 2544 2545 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2546 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2547 2548 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2549 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2550 2551 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2552 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2553 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2554 } 2555 2556 /* Buffer Manager configuration routines */ 2557 2558 /* Create pool */ 2559 static int mvpp2_bm_pool_create(struct udevice *dev, 2560 struct mvpp2 *priv, 2561 struct mvpp2_bm_pool *bm_pool, int size) 2562 { 2563 u32 val; 2564 2565 /* Number of buffer pointers must be a multiple of 16, as per 2566 * hardware constraints 2567 */ 2568 if (!IS_ALIGNED(size, 16)) 2569 return -EINVAL; 2570 2571 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2572 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2573 if (!bm_pool->virt_addr) 2574 return -ENOMEM; 2575 2576 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2577 MVPP2_BM_POOL_PTR_ALIGN)) { 2578 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2579 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2580 return -ENOMEM; 2581 } 2582 2583 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2584 lower_32_bits(bm_pool->dma_addr)); 2585 if (priv->hw_version == MVPP22) 2586 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG, 2587 (upper_32_bits(bm_pool->dma_addr) & 2588 MVPP22_BM_POOL_BASE_HIGH_MASK)); 2589 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2590 2591 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2592 val |= MVPP2_BM_START_MASK; 2593 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2594 2595 bm_pool->type = MVPP2_BM_FREE; 2596 bm_pool->size = size; 2597 bm_pool->pkt_size = 0; 2598 bm_pool->buf_num = 0; 2599 2600 return 0; 2601 } 2602 2603 /* Set pool buffer size */ 2604 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2605 struct mvpp2_bm_pool *bm_pool, 2606 int buf_size) 2607 { 2608 u32 val; 2609 2610 bm_pool->buf_size = buf_size; 2611 2612 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2613 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2614 } 2615 2616 /* Free all buffers from the pool */ 2617 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2618 struct mvpp2_bm_pool *bm_pool) 2619 { 2620 int i; 2621 2622 for (i = 0; i < bm_pool->buf_num; i++) { 2623 /* Allocate buffer back from the buffer manager */ 2624 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2625 } 2626 2627 bm_pool->buf_num = 0; 2628 } 2629 2630 /* Cleanup pool */ 2631 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2632 struct mvpp2 *priv, 2633 struct mvpp2_bm_pool *bm_pool) 2634 { 2635 u32 val; 2636 2637 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2638 if (bm_pool->buf_num) { 2639 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2640 return 0; 2641 } 2642 2643 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2644 val |= MVPP2_BM_STOP_MASK; 2645 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2646 2647 return 0; 2648 } 2649 2650 static int mvpp2_bm_pools_init(struct udevice *dev, 2651 struct mvpp2 *priv) 2652 { 2653 int i, err, size; 2654 struct mvpp2_bm_pool *bm_pool; 2655 2656 /* Create all pools with maximum size */ 2657 size = MVPP2_BM_POOL_SIZE_MAX; 2658 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2659 bm_pool = &priv->bm_pools[i]; 2660 bm_pool->id = i; 2661 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2662 if (err) 2663 goto err_unroll_pools; 2664 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE); 2665 } 2666 return 0; 2667 2668 err_unroll_pools: 2669 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2670 for (i = i - 1; i >= 0; i--) 2671 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2672 return err; 2673 } 2674 2675 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2676 { 2677 int i, err; 2678 2679 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2680 /* Mask BM all interrupts */ 2681 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2682 /* Clear BM cause register */ 2683 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2684 } 2685 2686 /* Allocate and initialize BM pools */ 2687 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2688 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2689 if (!priv->bm_pools) 2690 return -ENOMEM; 2691 2692 err = mvpp2_bm_pools_init(dev, priv); 2693 if (err < 0) 2694 return err; 2695 return 0; 2696 } 2697 2698 /* Attach long pool to rxq */ 2699 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2700 int lrxq, int long_pool) 2701 { 2702 u32 val, mask; 2703 int prxq; 2704 2705 /* Get queue physical ID */ 2706 prxq = port->rxqs[lrxq]->id; 2707 2708 if (port->priv->hw_version == MVPP21) 2709 mask = MVPP21_RXQ_POOL_LONG_MASK; 2710 else 2711 mask = MVPP22_RXQ_POOL_LONG_MASK; 2712 2713 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2714 val &= ~mask; 2715 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2716 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2717 } 2718 2719 /* Set pool number in a BM cookie */ 2720 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2721 { 2722 u32 bm; 2723 2724 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2725 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2726 2727 return bm; 2728 } 2729 2730 /* Get pool number from a BM cookie */ 2731 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2732 { 2733 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2734 } 2735 2736 /* Release buffer to BM */ 2737 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2738 dma_addr_t buf_dma_addr, 2739 unsigned long buf_phys_addr) 2740 { 2741 if (port->priv->hw_version == MVPP22) { 2742 u32 val = 0; 2743 2744 if (sizeof(dma_addr_t) == 8) 2745 val |= upper_32_bits(buf_dma_addr) & 2746 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2747 2748 if (sizeof(phys_addr_t) == 8) 2749 val |= (upper_32_bits(buf_phys_addr) 2750 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2751 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2752 2753 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2754 } 2755 2756 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2757 * returned in the "cookie" field of the RX 2758 * descriptor. Instead of storing the virtual address, we 2759 * store the physical address 2760 */ 2761 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2762 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2763 } 2764 2765 /* Refill BM pool */ 2766 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2767 dma_addr_t dma_addr, 2768 phys_addr_t phys_addr) 2769 { 2770 int pool = mvpp2_bm_cookie_pool_get(bm); 2771 2772 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2773 } 2774 2775 /* Allocate buffers for the pool */ 2776 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2777 struct mvpp2_bm_pool *bm_pool, int buf_num) 2778 { 2779 int i; 2780 2781 if (buf_num < 0 || 2782 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2783 netdev_err(port->dev, 2784 "cannot allocate %d buffers for pool %d\n", 2785 buf_num, bm_pool->id); 2786 return 0; 2787 } 2788 2789 for (i = 0; i < buf_num; i++) { 2790 mvpp2_bm_pool_put(port, bm_pool->id, 2791 (dma_addr_t)buffer_loc.rx_buffer[i], 2792 (unsigned long)buffer_loc.rx_buffer[i]); 2793 2794 } 2795 2796 /* Update BM driver with number of buffers added to pool */ 2797 bm_pool->buf_num += i; 2798 2799 return i; 2800 } 2801 2802 /* Notify the driver that BM pool is being used as specific type and return the 2803 * pool pointer on success 2804 */ 2805 static struct mvpp2_bm_pool * 2806 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2807 int pkt_size) 2808 { 2809 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2810 int num; 2811 2812 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2813 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2814 return NULL; 2815 } 2816 2817 if (new_pool->type == MVPP2_BM_FREE) 2818 new_pool->type = type; 2819 2820 /* Allocate buffers in case BM pool is used as long pool, but packet 2821 * size doesn't match MTU or BM pool hasn't being used yet 2822 */ 2823 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2824 (new_pool->pkt_size == 0)) { 2825 int pkts_num; 2826 2827 /* Set default buffer number or free all the buffers in case 2828 * the pool is not empty 2829 */ 2830 pkts_num = new_pool->buf_num; 2831 if (pkts_num == 0) 2832 pkts_num = type == MVPP2_BM_SWF_LONG ? 2833 MVPP2_BM_LONG_BUF_NUM : 2834 MVPP2_BM_SHORT_BUF_NUM; 2835 else 2836 mvpp2_bm_bufs_free(NULL, 2837 port->priv, new_pool); 2838 2839 new_pool->pkt_size = pkt_size; 2840 2841 /* Allocate buffers for this pool */ 2842 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2843 if (num != pkts_num) { 2844 dev_err(dev, "pool %d: %d of %d allocated\n", 2845 new_pool->id, num, pkts_num); 2846 return NULL; 2847 } 2848 } 2849 2850 return new_pool; 2851 } 2852 2853 /* Initialize pools for swf */ 2854 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2855 { 2856 int rxq; 2857 2858 if (!port->pool_long) { 2859 port->pool_long = 2860 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2861 MVPP2_BM_SWF_LONG, 2862 port->pkt_size); 2863 if (!port->pool_long) 2864 return -ENOMEM; 2865 2866 port->pool_long->port_map |= (1 << port->id); 2867 2868 for (rxq = 0; rxq < rxq_number; rxq++) 2869 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2870 } 2871 2872 return 0; 2873 } 2874 2875 /* Port configuration routines */ 2876 2877 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2878 { 2879 u32 val; 2880 2881 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2882 2883 switch (port->phy_interface) { 2884 case PHY_INTERFACE_MODE_SGMII: 2885 val |= MVPP2_GMAC_INBAND_AN_MASK; 2886 break; 2887 case PHY_INTERFACE_MODE_RGMII: 2888 case PHY_INTERFACE_MODE_RGMII_ID: 2889 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2890 default: 2891 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2892 } 2893 2894 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2895 } 2896 2897 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2898 { 2899 u32 val; 2900 2901 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2902 val |= MVPP2_GMAC_FC_ADV_EN; 2903 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2904 } 2905 2906 static void mvpp2_port_enable(struct mvpp2_port *port) 2907 { 2908 u32 val; 2909 2910 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2911 val |= MVPP2_GMAC_PORT_EN_MASK; 2912 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2913 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2914 } 2915 2916 static void mvpp2_port_disable(struct mvpp2_port *port) 2917 { 2918 u32 val; 2919 2920 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2921 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2922 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2923 } 2924 2925 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2926 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2927 { 2928 u32 val; 2929 2930 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2931 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2932 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2933 } 2934 2935 /* Configure loopback port */ 2936 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2937 { 2938 u32 val; 2939 2940 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2941 2942 if (port->speed == 1000) 2943 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2944 else 2945 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2946 2947 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2948 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2949 else 2950 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2951 2952 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2953 } 2954 2955 static void mvpp2_port_reset(struct mvpp2_port *port) 2956 { 2957 u32 val; 2958 2959 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2960 ~MVPP2_GMAC_PORT_RESET_MASK; 2961 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2962 2963 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2964 MVPP2_GMAC_PORT_RESET_MASK) 2965 continue; 2966 } 2967 2968 /* Change maximum receive size of the port */ 2969 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2970 { 2971 u32 val; 2972 2973 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2974 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2975 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2976 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2977 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2978 } 2979 2980 /* PPv2.2 GoP/GMAC config */ 2981 2982 /* Set the MAC to reset or exit from reset */ 2983 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2984 { 2985 u32 val; 2986 2987 /* read - modify - write */ 2988 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2989 if (reset) 2990 val |= MVPP2_GMAC_PORT_RESET_MASK; 2991 else 2992 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 2993 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2994 2995 return 0; 2996 } 2997 2998 /* 2999 * gop_gpcs_mode_cfg 3000 * 3001 * Configure port to working with Gig PCS or don't. 3002 */ 3003 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3004 { 3005 u32 val; 3006 3007 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3008 if (en) 3009 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3010 else 3011 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3012 /* enable / disable PCS on this port */ 3013 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3014 3015 return 0; 3016 } 3017 3018 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3019 { 3020 u32 val; 3021 3022 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3023 if (en) 3024 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3025 else 3026 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3027 /* enable / disable PCS on this port */ 3028 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3029 3030 return 0; 3031 } 3032 3033 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3034 { 3035 u32 val, thresh; 3036 3037 /* 3038 * Configure minimal level of the Tx FIFO before the lower part 3039 * starts to read a packet 3040 */ 3041 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3042 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3043 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3044 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3045 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3046 3047 /* Disable bypass of sync module */ 3048 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3049 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3050 /* configure DP clock select according to mode */ 3051 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3052 /* configure QSGMII bypass according to mode */ 3053 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3054 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3055 3056 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3057 /* 3058 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3059 * transceiver 3060 */ 3061 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3062 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3063 3064 /* configure AN 0x9268 */ 3065 val = MVPP2_GMAC_EN_PCS_AN | 3066 MVPP2_GMAC_AN_BYPASS_EN | 3067 MVPP2_GMAC_CONFIG_MII_SPEED | 3068 MVPP2_GMAC_CONFIG_GMII_SPEED | 3069 MVPP2_GMAC_FC_ADV_EN | 3070 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3071 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3072 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3073 } 3074 3075 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3076 { 3077 u32 val, thresh; 3078 3079 /* 3080 * Configure minimal level of the Tx FIFO before the lower part 3081 * starts to read a packet 3082 */ 3083 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3084 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3085 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3086 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3087 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3088 3089 /* Disable bypass of sync module */ 3090 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3091 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3092 /* configure DP clock select according to mode */ 3093 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3094 /* configure QSGMII bypass according to mode */ 3095 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3096 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3097 3098 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3099 /* configure GIG MAC to SGMII mode */ 3100 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3101 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3102 3103 /* configure AN */ 3104 val = MVPP2_GMAC_EN_PCS_AN | 3105 MVPP2_GMAC_AN_BYPASS_EN | 3106 MVPP2_GMAC_AN_SPEED_EN | 3107 MVPP2_GMAC_EN_FC_AN | 3108 MVPP2_GMAC_AN_DUPLEX_EN | 3109 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3110 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3111 } 3112 3113 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3114 { 3115 u32 val, thresh; 3116 3117 /* 3118 * Configure minimal level of the Tx FIFO before the lower part 3119 * starts to read a packet 3120 */ 3121 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3122 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3123 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3124 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3125 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3126 3127 /* Disable bypass of sync module */ 3128 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3129 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3130 /* configure DP clock select according to mode */ 3131 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3132 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3133 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3134 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3135 3136 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3137 /* configure GIG MAC to SGMII mode */ 3138 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3139 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3140 3141 /* configure AN 0xb8e8 */ 3142 val = MVPP2_GMAC_AN_BYPASS_EN | 3143 MVPP2_GMAC_AN_SPEED_EN | 3144 MVPP2_GMAC_EN_FC_AN | 3145 MVPP2_GMAC_AN_DUPLEX_EN | 3146 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3147 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3148 } 3149 3150 /* Set the internal mux's to the required MAC in the GOP */ 3151 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3152 { 3153 u32 val; 3154 3155 /* Set TX FIFO thresholds */ 3156 switch (port->phy_interface) { 3157 case PHY_INTERFACE_MODE_SGMII: 3158 if (port->phy_speed == 2500) 3159 gop_gmac_sgmii2_5_cfg(port); 3160 else 3161 gop_gmac_sgmii_cfg(port); 3162 break; 3163 3164 case PHY_INTERFACE_MODE_RGMII: 3165 case PHY_INTERFACE_MODE_RGMII_ID: 3166 gop_gmac_rgmii_cfg(port); 3167 break; 3168 3169 default: 3170 return -1; 3171 } 3172 3173 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3174 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3175 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3176 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3177 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3178 3179 /* PeriodicXonEn disable */ 3180 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3181 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3182 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3183 3184 return 0; 3185 } 3186 3187 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3188 { 3189 u32 val; 3190 3191 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3192 if (port->gop_id > 0) 3193 return; 3194 3195 /* configure 1Gig MAC mode */ 3196 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3197 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3198 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3199 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3200 } 3201 3202 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3203 { 3204 u32 val; 3205 3206 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3207 if (reset) 3208 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3209 else 3210 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3211 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3212 3213 return 0; 3214 } 3215 3216 /* Set the internal mux's to the required PCS in the PI */ 3217 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3218 { 3219 u32 val; 3220 int lane; 3221 3222 switch (num_of_lanes) { 3223 case 1: 3224 lane = 0; 3225 break; 3226 case 2: 3227 lane = 1; 3228 break; 3229 case 4: 3230 lane = 2; 3231 break; 3232 default: 3233 return -1; 3234 } 3235 3236 /* configure XG MAC mode */ 3237 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3238 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3239 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3240 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3241 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3242 3243 return 0; 3244 } 3245 3246 static int gop_mpcs_mode(struct mvpp2_port *port) 3247 { 3248 u32 val; 3249 3250 /* configure PCS40G COMMON CONTROL */ 3251 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3252 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3253 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3254 3255 /* configure PCS CLOCK RESET */ 3256 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3257 val &= ~CLK_DIVISION_RATIO_MASK; 3258 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3259 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3260 3261 val &= ~CLK_DIV_PHASE_SET_MASK; 3262 val |= MAC_CLK_RESET_MASK; 3263 val |= RX_SD_CLK_RESET_MASK; 3264 val |= TX_SD_CLK_RESET_MASK; 3265 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3266 3267 return 0; 3268 } 3269 3270 /* Set the internal mux's to the required MAC in the GOP */ 3271 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3272 { 3273 u32 val; 3274 3275 /* configure 10G MAC mode */ 3276 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3277 val |= MVPP22_XLG_RX_FC_EN; 3278 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3279 3280 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3281 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3282 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3283 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3284 3285 /* read - modify - write */ 3286 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3287 val &= ~MVPP22_XLG_MODE_DMA_1G; 3288 val |= MVPP22_XLG_FORWARD_PFC_EN; 3289 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3290 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3291 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3292 3293 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3294 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3295 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3296 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3297 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3298 3299 /* unmask link change interrupt */ 3300 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3301 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3302 val |= 1; /* unmask summary bit */ 3303 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3304 3305 return 0; 3306 } 3307 3308 /* Set PCS to reset or exit from reset */ 3309 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3310 { 3311 u32 val; 3312 3313 /* read - modify - write */ 3314 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3315 if (reset) 3316 val &= ~MVPP22_XPCS_PCSRESET; 3317 else 3318 val |= MVPP22_XPCS_PCSRESET; 3319 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3320 3321 return 0; 3322 } 3323 3324 /* Set the MAC to reset or exit from reset */ 3325 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3326 { 3327 u32 val; 3328 3329 /* read - modify - write */ 3330 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3331 if (reset) 3332 val &= ~MVPP22_XLG_MAC_RESETN; 3333 else 3334 val |= MVPP22_XLG_MAC_RESETN; 3335 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3336 3337 return 0; 3338 } 3339 3340 /* 3341 * gop_port_init 3342 * 3343 * Init physical port. Configures the port mode and all it's elements 3344 * accordingly. 3345 * Does not verify that the selected mode/port number is valid at the 3346 * core level. 3347 */ 3348 static int gop_port_init(struct mvpp2_port *port) 3349 { 3350 int mac_num = port->gop_id; 3351 int num_of_act_lanes; 3352 3353 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3354 netdev_err(NULL, "%s: illegal port number %d", __func__, 3355 mac_num); 3356 return -1; 3357 } 3358 3359 switch (port->phy_interface) { 3360 case PHY_INTERFACE_MODE_RGMII: 3361 case PHY_INTERFACE_MODE_RGMII_ID: 3362 gop_gmac_reset(port, 1); 3363 3364 /* configure PCS */ 3365 gop_gpcs_mode_cfg(port, 0); 3366 gop_bypass_clk_cfg(port, 1); 3367 3368 /* configure MAC */ 3369 gop_gmac_mode_cfg(port); 3370 /* pcs unreset */ 3371 gop_gpcs_reset(port, 0); 3372 3373 /* mac unreset */ 3374 gop_gmac_reset(port, 0); 3375 break; 3376 3377 case PHY_INTERFACE_MODE_SGMII: 3378 /* configure PCS */ 3379 gop_gpcs_mode_cfg(port, 1); 3380 3381 /* configure MAC */ 3382 gop_gmac_mode_cfg(port); 3383 /* select proper Mac mode */ 3384 gop_xlg_2_gig_mac_cfg(port); 3385 3386 /* pcs unreset */ 3387 gop_gpcs_reset(port, 0); 3388 /* mac unreset */ 3389 gop_gmac_reset(port, 0); 3390 break; 3391 3392 case PHY_INTERFACE_MODE_SFI: 3393 num_of_act_lanes = 2; 3394 mac_num = 0; 3395 /* configure PCS */ 3396 gop_xpcs_mode(port, num_of_act_lanes); 3397 gop_mpcs_mode(port); 3398 /* configure MAC */ 3399 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3400 3401 /* pcs unreset */ 3402 gop_xpcs_reset(port, 0); 3403 3404 /* mac unreset */ 3405 gop_xlg_mac_reset(port, 0); 3406 break; 3407 3408 default: 3409 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3410 __func__, port->phy_interface); 3411 return -1; 3412 } 3413 3414 return 0; 3415 } 3416 3417 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3418 { 3419 u32 val; 3420 3421 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3422 if (enable) { 3423 /* Enable port and MIB counters update */ 3424 val |= MVPP22_XLG_PORT_EN; 3425 val &= ~MVPP22_XLG_MIBCNT_DIS; 3426 } else { 3427 /* Disable port */ 3428 val &= ~MVPP22_XLG_PORT_EN; 3429 } 3430 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3431 } 3432 3433 static void gop_port_enable(struct mvpp2_port *port, int enable) 3434 { 3435 switch (port->phy_interface) { 3436 case PHY_INTERFACE_MODE_RGMII: 3437 case PHY_INTERFACE_MODE_RGMII_ID: 3438 case PHY_INTERFACE_MODE_SGMII: 3439 if (enable) 3440 mvpp2_port_enable(port); 3441 else 3442 mvpp2_port_disable(port); 3443 break; 3444 3445 case PHY_INTERFACE_MODE_SFI: 3446 gop_xlg_mac_port_enable(port, enable); 3447 3448 break; 3449 default: 3450 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3451 port->phy_interface); 3452 return; 3453 } 3454 } 3455 3456 /* RFU1 functions */ 3457 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3458 { 3459 return readl(priv->rfu1_base + offset); 3460 } 3461 3462 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3463 { 3464 writel(data, priv->rfu1_base + offset); 3465 } 3466 3467 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3468 { 3469 u32 val = 0; 3470 3471 if (gop_id == 2) { 3472 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3473 val |= MV_NETC_GE_MAC2_SGMII; 3474 } 3475 3476 if (gop_id == 3) { 3477 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3478 val |= MV_NETC_GE_MAC3_SGMII; 3479 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3480 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3481 val |= MV_NETC_GE_MAC3_RGMII; 3482 } 3483 3484 return val; 3485 } 3486 3487 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3488 { 3489 u32 reg; 3490 3491 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3492 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3493 3494 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3495 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3496 3497 reg |= val; 3498 3499 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3500 } 3501 3502 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3503 { 3504 u32 reg; 3505 3506 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3507 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3508 3509 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3510 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3511 3512 reg |= val; 3513 3514 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3515 } 3516 3517 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3518 { 3519 u32 reg; 3520 3521 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3522 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3523 3524 val <<= NETC_GOP_SOFT_RESET_OFFS; 3525 val &= NETC_GOP_SOFT_RESET_MASK; 3526 3527 reg |= val; 3528 3529 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3530 } 3531 3532 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3533 { 3534 u32 reg; 3535 3536 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3537 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3538 3539 val <<= NETC_CLK_DIV_PHASE_OFFS; 3540 val &= NETC_CLK_DIV_PHASE_MASK; 3541 3542 reg |= val; 3543 3544 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3545 } 3546 3547 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3548 { 3549 u32 reg; 3550 3551 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3552 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3553 3554 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3555 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3556 3557 reg |= val; 3558 3559 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3560 } 3561 3562 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3563 u32 val) 3564 { 3565 u32 reg, mask, offset; 3566 3567 if (gop_id == 2) { 3568 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3569 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3570 } else { 3571 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3572 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3573 } 3574 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3575 reg &= ~mask; 3576 3577 val <<= offset; 3578 val &= mask; 3579 3580 reg |= val; 3581 3582 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3583 } 3584 3585 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3586 { 3587 u32 reg; 3588 3589 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3590 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3591 3592 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3593 val &= NETC_BUS_WIDTH_SELECT_MASK; 3594 3595 reg |= val; 3596 3597 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3598 } 3599 3600 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3601 { 3602 u32 reg; 3603 3604 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3605 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3606 3607 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3608 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3609 3610 reg |= val; 3611 3612 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3613 } 3614 3615 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3616 enum mv_netc_phase phase) 3617 { 3618 switch (phase) { 3619 case MV_NETC_FIRST_PHASE: 3620 /* Set Bus Width to HB mode = 1 */ 3621 gop_netc_bus_width_select(priv, 1); 3622 /* Select RGMII mode */ 3623 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3624 break; 3625 3626 case MV_NETC_SECOND_PHASE: 3627 /* De-assert the relevant port HB reset */ 3628 gop_netc_port_rf_reset(priv, gop_id, 1); 3629 break; 3630 } 3631 } 3632 3633 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3634 enum mv_netc_phase phase) 3635 { 3636 switch (phase) { 3637 case MV_NETC_FIRST_PHASE: 3638 /* Set Bus Width to HB mode = 1 */ 3639 gop_netc_bus_width_select(priv, 1); 3640 /* Select SGMII mode */ 3641 if (gop_id >= 1) { 3642 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3643 MV_NETC_GBE_SGMII); 3644 } 3645 3646 /* Configure the sample stages */ 3647 gop_netc_sample_stages_timing(priv, 0); 3648 /* Configure the ComPhy Selector */ 3649 /* gop_netc_com_phy_selector_config(netComplex); */ 3650 break; 3651 3652 case MV_NETC_SECOND_PHASE: 3653 /* De-assert the relevant port HB reset */ 3654 gop_netc_port_rf_reset(priv, gop_id, 1); 3655 break; 3656 } 3657 } 3658 3659 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3660 { 3661 u32 c = priv->netc_config; 3662 3663 if (c & MV_NETC_GE_MAC2_SGMII) 3664 gop_netc_mac_to_sgmii(priv, 2, phase); 3665 else 3666 gop_netc_mac_to_xgmii(priv, 2, phase); 3667 3668 if (c & MV_NETC_GE_MAC3_SGMII) { 3669 gop_netc_mac_to_sgmii(priv, 3, phase); 3670 } else { 3671 gop_netc_mac_to_xgmii(priv, 3, phase); 3672 if (c & MV_NETC_GE_MAC3_RGMII) 3673 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3674 else 3675 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3676 } 3677 3678 /* Activate gop ports 0, 2, 3 */ 3679 gop_netc_active_port(priv, 0, 1); 3680 gop_netc_active_port(priv, 2, 1); 3681 gop_netc_active_port(priv, 3, 1); 3682 3683 if (phase == MV_NETC_SECOND_PHASE) { 3684 /* Enable the GOP internal clock logic */ 3685 gop_netc_gop_clock_logic_set(priv, 1); 3686 /* De-assert GOP unit reset */ 3687 gop_netc_gop_reset(priv, 1); 3688 } 3689 3690 return 0; 3691 } 3692 3693 /* Set defaults to the MVPP2 port */ 3694 static void mvpp2_defaults_set(struct mvpp2_port *port) 3695 { 3696 int tx_port_num, val, queue, ptxq, lrxq; 3697 3698 if (port->priv->hw_version == MVPP21) { 3699 /* Configure port to loopback if needed */ 3700 if (port->flags & MVPP2_F_LOOPBACK) 3701 mvpp2_port_loopback_set(port); 3702 3703 /* Update TX FIFO MIN Threshold */ 3704 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3705 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3706 /* Min. TX threshold must be less than minimal packet length */ 3707 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3708 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3709 } 3710 3711 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3712 tx_port_num = mvpp2_egress_port(port); 3713 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3714 tx_port_num); 3715 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3716 3717 /* Close bandwidth for all queues */ 3718 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3719 ptxq = mvpp2_txq_phys(port->id, queue); 3720 mvpp2_write(port->priv, 3721 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3722 } 3723 3724 /* Set refill period to 1 usec, refill tokens 3725 * and bucket size to maximum 3726 */ 3727 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3728 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3729 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3730 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3731 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3732 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3733 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3734 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3735 3736 /* Set MaximumLowLatencyPacketSize value to 256 */ 3737 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3738 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3739 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3740 3741 /* Enable Rx cache snoop */ 3742 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3743 queue = port->rxqs[lrxq]->id; 3744 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3745 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3746 MVPP2_SNOOP_BUF_HDR_MASK; 3747 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3748 } 3749 } 3750 3751 /* Enable/disable receiving packets */ 3752 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3753 { 3754 u32 val; 3755 int lrxq, queue; 3756 3757 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3758 queue = port->rxqs[lrxq]->id; 3759 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3760 val &= ~MVPP2_RXQ_DISABLE_MASK; 3761 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3762 } 3763 } 3764 3765 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3766 { 3767 u32 val; 3768 int lrxq, queue; 3769 3770 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3771 queue = port->rxqs[lrxq]->id; 3772 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3773 val |= MVPP2_RXQ_DISABLE_MASK; 3774 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3775 } 3776 } 3777 3778 /* Enable transmit via physical egress queue 3779 * - HW starts take descriptors from DRAM 3780 */ 3781 static void mvpp2_egress_enable(struct mvpp2_port *port) 3782 { 3783 u32 qmap; 3784 int queue; 3785 int tx_port_num = mvpp2_egress_port(port); 3786 3787 /* Enable all initialized TXs. */ 3788 qmap = 0; 3789 for (queue = 0; queue < txq_number; queue++) { 3790 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3791 3792 if (txq->descs != NULL) 3793 qmap |= (1 << queue); 3794 } 3795 3796 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3797 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3798 } 3799 3800 /* Disable transmit via physical egress queue 3801 * - HW doesn't take descriptors from DRAM 3802 */ 3803 static void mvpp2_egress_disable(struct mvpp2_port *port) 3804 { 3805 u32 reg_data; 3806 int delay; 3807 int tx_port_num = mvpp2_egress_port(port); 3808 3809 /* Issue stop command for active channels only */ 3810 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3811 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3812 MVPP2_TXP_SCHED_ENQ_MASK; 3813 if (reg_data != 0) 3814 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3815 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3816 3817 /* Wait for all Tx activity to terminate. */ 3818 delay = 0; 3819 do { 3820 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3821 netdev_warn(port->dev, 3822 "Tx stop timed out, status=0x%08x\n", 3823 reg_data); 3824 break; 3825 } 3826 mdelay(1); 3827 delay++; 3828 3829 /* Check port TX Command register that all 3830 * Tx queues are stopped 3831 */ 3832 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3833 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3834 } 3835 3836 /* Rx descriptors helper methods */ 3837 3838 /* Get number of Rx descriptors occupied by received packets */ 3839 static inline int 3840 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3841 { 3842 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3843 3844 return val & MVPP2_RXQ_OCCUPIED_MASK; 3845 } 3846 3847 /* Update Rx queue status with the number of occupied and available 3848 * Rx descriptor slots. 3849 */ 3850 static inline void 3851 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3852 int used_count, int free_count) 3853 { 3854 /* Decrement the number of used descriptors and increment count 3855 * increment the number of free descriptors. 3856 */ 3857 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3858 3859 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3860 } 3861 3862 /* Get pointer to next RX descriptor to be processed by SW */ 3863 static inline struct mvpp2_rx_desc * 3864 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3865 { 3866 int rx_desc = rxq->next_desc_to_proc; 3867 3868 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3869 prefetch(rxq->descs + rxq->next_desc_to_proc); 3870 return rxq->descs + rx_desc; 3871 } 3872 3873 /* Set rx queue offset */ 3874 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3875 int prxq, int offset) 3876 { 3877 u32 val; 3878 3879 /* Convert offset from bytes to units of 32 bytes */ 3880 offset = offset >> 5; 3881 3882 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3883 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3884 3885 /* Offset is in */ 3886 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3887 MVPP2_RXQ_PACKET_OFFSET_MASK); 3888 3889 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3890 } 3891 3892 /* Obtain BM cookie information from descriptor */ 3893 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3894 struct mvpp2_rx_desc *rx_desc) 3895 { 3896 int cpu = smp_processor_id(); 3897 int pool; 3898 3899 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3900 MVPP2_RXD_BM_POOL_ID_MASK) >> 3901 MVPP2_RXD_BM_POOL_ID_OFFS; 3902 3903 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3904 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3905 } 3906 3907 /* Tx descriptors helper methods */ 3908 3909 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3910 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3911 struct mvpp2_tx_queue *txq) 3912 { 3913 u32 val; 3914 3915 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3916 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3917 3918 return val & MVPP2_TXQ_PENDING_MASK; 3919 } 3920 3921 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3922 static struct mvpp2_tx_desc * 3923 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3924 { 3925 int tx_desc = txq->next_desc_to_proc; 3926 3927 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3928 return txq->descs + tx_desc; 3929 } 3930 3931 /* Update HW with number of aggregated Tx descriptors to be sent */ 3932 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3933 { 3934 /* aggregated access - relevant TXQ number is written in TX desc */ 3935 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3936 } 3937 3938 /* Get number of sent descriptors and decrement counter. 3939 * The number of sent descriptors is returned. 3940 * Per-CPU access 3941 */ 3942 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3943 struct mvpp2_tx_queue *txq) 3944 { 3945 u32 val; 3946 3947 /* Reading status reg resets transmitted descriptor counter */ 3948 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3949 3950 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3951 MVPP2_TRANSMITTED_COUNT_OFFSET; 3952 } 3953 3954 static void mvpp2_txq_sent_counter_clear(void *arg) 3955 { 3956 struct mvpp2_port *port = arg; 3957 int queue; 3958 3959 for (queue = 0; queue < txq_number; queue++) { 3960 int id = port->txqs[queue]->id; 3961 3962 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3963 } 3964 } 3965 3966 /* Set max sizes for Tx queues */ 3967 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3968 { 3969 u32 val, size, mtu; 3970 int txq, tx_port_num; 3971 3972 mtu = port->pkt_size * 8; 3973 if (mtu > MVPP2_TXP_MTU_MAX) 3974 mtu = MVPP2_TXP_MTU_MAX; 3975 3976 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3977 mtu = 3 * mtu; 3978 3979 /* Indirect access to registers */ 3980 tx_port_num = mvpp2_egress_port(port); 3981 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3982 3983 /* Set MTU */ 3984 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 3985 val &= ~MVPP2_TXP_MTU_MAX; 3986 val |= mtu; 3987 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 3988 3989 /* TXP token size and all TXQs token size must be larger that MTU */ 3990 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 3991 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 3992 if (size < mtu) { 3993 size = mtu; 3994 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 3995 val |= size; 3996 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3997 } 3998 3999 for (txq = 0; txq < txq_number; txq++) { 4000 val = mvpp2_read(port->priv, 4001 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4002 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4003 4004 if (size < mtu) { 4005 size = mtu; 4006 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4007 val |= size; 4008 mvpp2_write(port->priv, 4009 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4010 val); 4011 } 4012 } 4013 } 4014 4015 /* Free Tx queue skbuffs */ 4016 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4017 struct mvpp2_tx_queue *txq, 4018 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4019 { 4020 int i; 4021 4022 for (i = 0; i < num; i++) 4023 mvpp2_txq_inc_get(txq_pcpu); 4024 } 4025 4026 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4027 u32 cause) 4028 { 4029 int queue = fls(cause) - 1; 4030 4031 return port->rxqs[queue]; 4032 } 4033 4034 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4035 u32 cause) 4036 { 4037 int queue = fls(cause) - 1; 4038 4039 return port->txqs[queue]; 4040 } 4041 4042 /* Rx/Tx queue initialization/cleanup methods */ 4043 4044 /* Allocate and initialize descriptors for aggr TXQ */ 4045 static int mvpp2_aggr_txq_init(struct udevice *dev, 4046 struct mvpp2_tx_queue *aggr_txq, 4047 int desc_num, int cpu, 4048 struct mvpp2 *priv) 4049 { 4050 u32 txq_dma; 4051 4052 /* Allocate memory for TX descriptors */ 4053 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4054 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4055 if (!aggr_txq->descs) 4056 return -ENOMEM; 4057 4058 /* Make sure descriptor address is cache line size aligned */ 4059 BUG_ON(aggr_txq->descs != 4060 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4061 4062 aggr_txq->last_desc = aggr_txq->size - 1; 4063 4064 /* Aggr TXQ no reset WA */ 4065 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4066 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4067 4068 /* Set Tx descriptors queue starting address indirect 4069 * access 4070 */ 4071 if (priv->hw_version == MVPP21) 4072 txq_dma = aggr_txq->descs_dma; 4073 else 4074 txq_dma = aggr_txq->descs_dma >> 4075 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4076 4077 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4078 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4079 4080 return 0; 4081 } 4082 4083 /* Create a specified Rx queue */ 4084 static int mvpp2_rxq_init(struct mvpp2_port *port, 4085 struct mvpp2_rx_queue *rxq) 4086 4087 { 4088 u32 rxq_dma; 4089 4090 rxq->size = port->rx_ring_size; 4091 4092 /* Allocate memory for RX descriptors */ 4093 rxq->descs = buffer_loc.rx_descs; 4094 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4095 if (!rxq->descs) 4096 return -ENOMEM; 4097 4098 BUG_ON(rxq->descs != 4099 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4100 4101 rxq->last_desc = rxq->size - 1; 4102 4103 /* Zero occupied and non-occupied counters - direct access */ 4104 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4105 4106 /* Set Rx descriptors queue starting address - indirect access */ 4107 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4108 if (port->priv->hw_version == MVPP21) 4109 rxq_dma = rxq->descs_dma; 4110 else 4111 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4112 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4113 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4114 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4115 4116 /* Set Offset */ 4117 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4118 4119 /* Add number of descriptors ready for receiving packets */ 4120 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4121 4122 return 0; 4123 } 4124 4125 /* Push packets received by the RXQ to BM pool */ 4126 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4127 struct mvpp2_rx_queue *rxq) 4128 { 4129 int rx_received, i; 4130 4131 rx_received = mvpp2_rxq_received(port, rxq->id); 4132 if (!rx_received) 4133 return; 4134 4135 for (i = 0; i < rx_received; i++) { 4136 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4137 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4138 4139 mvpp2_pool_refill(port, bm, 4140 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4141 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4142 } 4143 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4144 } 4145 4146 /* Cleanup Rx queue */ 4147 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4148 struct mvpp2_rx_queue *rxq) 4149 { 4150 mvpp2_rxq_drop_pkts(port, rxq); 4151 4152 rxq->descs = NULL; 4153 rxq->last_desc = 0; 4154 rxq->next_desc_to_proc = 0; 4155 rxq->descs_dma = 0; 4156 4157 /* Clear Rx descriptors queue starting address and size; 4158 * free descriptor number 4159 */ 4160 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4161 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4162 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4163 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4164 } 4165 4166 /* Create and initialize a Tx queue */ 4167 static int mvpp2_txq_init(struct mvpp2_port *port, 4168 struct mvpp2_tx_queue *txq) 4169 { 4170 u32 val; 4171 int cpu, desc, desc_per_txq, tx_port_num; 4172 struct mvpp2_txq_pcpu *txq_pcpu; 4173 4174 txq->size = port->tx_ring_size; 4175 4176 /* Allocate memory for Tx descriptors */ 4177 txq->descs = buffer_loc.tx_descs; 4178 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4179 if (!txq->descs) 4180 return -ENOMEM; 4181 4182 /* Make sure descriptor address is cache line size aligned */ 4183 BUG_ON(txq->descs != 4184 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4185 4186 txq->last_desc = txq->size - 1; 4187 4188 /* Set Tx descriptors queue starting address - indirect access */ 4189 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4190 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4191 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4192 MVPP2_TXQ_DESC_SIZE_MASK); 4193 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4194 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4195 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4196 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4197 val &= ~MVPP2_TXQ_PENDING_MASK; 4198 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4199 4200 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4201 * for each existing TXQ. 4202 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4203 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4204 */ 4205 desc_per_txq = 16; 4206 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4207 (txq->log_id * desc_per_txq); 4208 4209 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4210 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4211 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4212 4213 /* WRR / EJP configuration - indirect access */ 4214 tx_port_num = mvpp2_egress_port(port); 4215 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4216 4217 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4218 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4219 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4220 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4221 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4222 4223 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4224 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4225 val); 4226 4227 for_each_present_cpu(cpu) { 4228 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4229 txq_pcpu->size = txq->size; 4230 } 4231 4232 return 0; 4233 } 4234 4235 /* Free allocated TXQ resources */ 4236 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4237 struct mvpp2_tx_queue *txq) 4238 { 4239 txq->descs = NULL; 4240 txq->last_desc = 0; 4241 txq->next_desc_to_proc = 0; 4242 txq->descs_dma = 0; 4243 4244 /* Set minimum bandwidth for disabled TXQs */ 4245 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4246 4247 /* Set Tx descriptors queue starting address and size */ 4248 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4249 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4250 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4251 } 4252 4253 /* Cleanup Tx ports */ 4254 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4255 { 4256 struct mvpp2_txq_pcpu *txq_pcpu; 4257 int delay, pending, cpu; 4258 u32 val; 4259 4260 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4261 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4262 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4263 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4264 4265 /* The napi queue has been stopped so wait for all packets 4266 * to be transmitted. 4267 */ 4268 delay = 0; 4269 do { 4270 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4271 netdev_warn(port->dev, 4272 "port %d: cleaning queue %d timed out\n", 4273 port->id, txq->log_id); 4274 break; 4275 } 4276 mdelay(1); 4277 delay++; 4278 4279 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4280 } while (pending); 4281 4282 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4283 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4284 4285 for_each_present_cpu(cpu) { 4286 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4287 4288 /* Release all packets */ 4289 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4290 4291 /* Reset queue */ 4292 txq_pcpu->count = 0; 4293 txq_pcpu->txq_put_index = 0; 4294 txq_pcpu->txq_get_index = 0; 4295 } 4296 } 4297 4298 /* Cleanup all Tx queues */ 4299 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4300 { 4301 struct mvpp2_tx_queue *txq; 4302 int queue; 4303 u32 val; 4304 4305 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4306 4307 /* Reset Tx ports and delete Tx queues */ 4308 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4309 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4310 4311 for (queue = 0; queue < txq_number; queue++) { 4312 txq = port->txqs[queue]; 4313 mvpp2_txq_clean(port, txq); 4314 mvpp2_txq_deinit(port, txq); 4315 } 4316 4317 mvpp2_txq_sent_counter_clear(port); 4318 4319 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4320 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4321 } 4322 4323 /* Cleanup all Rx queues */ 4324 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4325 { 4326 int queue; 4327 4328 for (queue = 0; queue < rxq_number; queue++) 4329 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4330 } 4331 4332 /* Init all Rx queues for port */ 4333 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4334 { 4335 int queue, err; 4336 4337 for (queue = 0; queue < rxq_number; queue++) { 4338 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4339 if (err) 4340 goto err_cleanup; 4341 } 4342 return 0; 4343 4344 err_cleanup: 4345 mvpp2_cleanup_rxqs(port); 4346 return err; 4347 } 4348 4349 /* Init all tx queues for port */ 4350 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4351 { 4352 struct mvpp2_tx_queue *txq; 4353 int queue, err; 4354 4355 for (queue = 0; queue < txq_number; queue++) { 4356 txq = port->txqs[queue]; 4357 err = mvpp2_txq_init(port, txq); 4358 if (err) 4359 goto err_cleanup; 4360 } 4361 4362 mvpp2_txq_sent_counter_clear(port); 4363 return 0; 4364 4365 err_cleanup: 4366 mvpp2_cleanup_txqs(port); 4367 return err; 4368 } 4369 4370 /* Adjust link */ 4371 static void mvpp2_link_event(struct mvpp2_port *port) 4372 { 4373 struct phy_device *phydev = port->phy_dev; 4374 int status_change = 0; 4375 u32 val; 4376 4377 if (phydev->link) { 4378 if ((port->speed != phydev->speed) || 4379 (port->duplex != phydev->duplex)) { 4380 u32 val; 4381 4382 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4383 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4384 MVPP2_GMAC_CONFIG_GMII_SPEED | 4385 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4386 MVPP2_GMAC_AN_SPEED_EN | 4387 MVPP2_GMAC_AN_DUPLEX_EN); 4388 4389 if (phydev->duplex) 4390 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4391 4392 if (phydev->speed == SPEED_1000) 4393 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4394 else if (phydev->speed == SPEED_100) 4395 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4396 4397 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4398 4399 port->duplex = phydev->duplex; 4400 port->speed = phydev->speed; 4401 } 4402 } 4403 4404 if (phydev->link != port->link) { 4405 if (!phydev->link) { 4406 port->duplex = -1; 4407 port->speed = 0; 4408 } 4409 4410 port->link = phydev->link; 4411 status_change = 1; 4412 } 4413 4414 if (status_change) { 4415 if (phydev->link) { 4416 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4417 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4418 MVPP2_GMAC_FORCE_LINK_DOWN); 4419 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4420 mvpp2_egress_enable(port); 4421 mvpp2_ingress_enable(port); 4422 } else { 4423 mvpp2_ingress_disable(port); 4424 mvpp2_egress_disable(port); 4425 } 4426 } 4427 } 4428 4429 /* Main RX/TX processing routines */ 4430 4431 /* Display more error info */ 4432 static void mvpp2_rx_error(struct mvpp2_port *port, 4433 struct mvpp2_rx_desc *rx_desc) 4434 { 4435 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4436 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4437 4438 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4439 case MVPP2_RXD_ERR_CRC: 4440 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4441 status, sz); 4442 break; 4443 case MVPP2_RXD_ERR_OVERRUN: 4444 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4445 status, sz); 4446 break; 4447 case MVPP2_RXD_ERR_RESOURCE: 4448 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4449 status, sz); 4450 break; 4451 } 4452 } 4453 4454 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4455 static int mvpp2_rx_refill(struct mvpp2_port *port, 4456 struct mvpp2_bm_pool *bm_pool, 4457 u32 bm, dma_addr_t dma_addr) 4458 { 4459 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4460 return 0; 4461 } 4462 4463 /* Set hw internals when starting port */ 4464 static void mvpp2_start_dev(struct mvpp2_port *port) 4465 { 4466 switch (port->phy_interface) { 4467 case PHY_INTERFACE_MODE_RGMII: 4468 case PHY_INTERFACE_MODE_RGMII_ID: 4469 case PHY_INTERFACE_MODE_SGMII: 4470 mvpp2_gmac_max_rx_size_set(port); 4471 default: 4472 break; 4473 } 4474 4475 mvpp2_txp_max_tx_size_set(port); 4476 4477 if (port->priv->hw_version == MVPP21) 4478 mvpp2_port_enable(port); 4479 else 4480 gop_port_enable(port, 1); 4481 } 4482 4483 /* Set hw internals when stopping port */ 4484 static void mvpp2_stop_dev(struct mvpp2_port *port) 4485 { 4486 /* Stop new packets from arriving to RXQs */ 4487 mvpp2_ingress_disable(port); 4488 4489 mvpp2_egress_disable(port); 4490 4491 if (port->priv->hw_version == MVPP21) 4492 mvpp2_port_disable(port); 4493 else 4494 gop_port_enable(port, 0); 4495 } 4496 4497 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4498 { 4499 struct phy_device *phy_dev; 4500 4501 if (!port->init || port->link == 0) { 4502 phy_dev = phy_connect(port->bus, port->phyaddr, dev, 4503 port->phy_interface); 4504 port->phy_dev = phy_dev; 4505 if (!phy_dev) { 4506 netdev_err(port->dev, "cannot connect to phy\n"); 4507 return -ENODEV; 4508 } 4509 phy_dev->supported &= PHY_GBIT_FEATURES; 4510 phy_dev->advertising = phy_dev->supported; 4511 4512 port->phy_dev = phy_dev; 4513 port->link = 0; 4514 port->duplex = 0; 4515 port->speed = 0; 4516 4517 phy_config(phy_dev); 4518 phy_startup(phy_dev); 4519 if (!phy_dev->link) { 4520 printf("%s: No link\n", phy_dev->dev->name); 4521 return -1; 4522 } 4523 4524 port->init = 1; 4525 } else { 4526 mvpp2_egress_enable(port); 4527 mvpp2_ingress_enable(port); 4528 } 4529 4530 return 0; 4531 } 4532 4533 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4534 { 4535 unsigned char mac_bcast[ETH_ALEN] = { 4536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4537 int err; 4538 4539 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4540 if (err) { 4541 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4542 return err; 4543 } 4544 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4545 port->dev_addr, true); 4546 if (err) { 4547 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4548 return err; 4549 } 4550 err = mvpp2_prs_def_flow(port); 4551 if (err) { 4552 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4553 return err; 4554 } 4555 4556 /* Allocate the Rx/Tx queues */ 4557 err = mvpp2_setup_rxqs(port); 4558 if (err) { 4559 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4560 return err; 4561 } 4562 4563 err = mvpp2_setup_txqs(port); 4564 if (err) { 4565 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4566 return err; 4567 } 4568 4569 if (port->phy_node) { 4570 err = mvpp2_phy_connect(dev, port); 4571 if (err < 0) 4572 return err; 4573 4574 mvpp2_link_event(port); 4575 } else { 4576 mvpp2_egress_enable(port); 4577 mvpp2_ingress_enable(port); 4578 } 4579 4580 mvpp2_start_dev(port); 4581 4582 return 0; 4583 } 4584 4585 /* No Device ops here in U-Boot */ 4586 4587 /* Driver initialization */ 4588 4589 static void mvpp2_port_power_up(struct mvpp2_port *port) 4590 { 4591 struct mvpp2 *priv = port->priv; 4592 4593 /* On PPv2.2 the GoP / interface configuration has already been done */ 4594 if (priv->hw_version == MVPP21) 4595 mvpp2_port_mii_set(port); 4596 mvpp2_port_periodic_xon_disable(port); 4597 if (priv->hw_version == MVPP21) 4598 mvpp2_port_fc_adv_enable(port); 4599 mvpp2_port_reset(port); 4600 } 4601 4602 /* Initialize port HW */ 4603 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4604 { 4605 struct mvpp2 *priv = port->priv; 4606 struct mvpp2_txq_pcpu *txq_pcpu; 4607 int queue, cpu, err; 4608 4609 if (port->first_rxq + rxq_number > 4610 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4611 return -EINVAL; 4612 4613 /* Disable port */ 4614 mvpp2_egress_disable(port); 4615 if (priv->hw_version == MVPP21) 4616 mvpp2_port_disable(port); 4617 else 4618 gop_port_enable(port, 0); 4619 4620 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4621 GFP_KERNEL); 4622 if (!port->txqs) 4623 return -ENOMEM; 4624 4625 /* Associate physical Tx queues to this port and initialize. 4626 * The mapping is predefined. 4627 */ 4628 for (queue = 0; queue < txq_number; queue++) { 4629 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4630 struct mvpp2_tx_queue *txq; 4631 4632 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4633 if (!txq) 4634 return -ENOMEM; 4635 4636 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4637 GFP_KERNEL); 4638 if (!txq->pcpu) 4639 return -ENOMEM; 4640 4641 txq->id = queue_phy_id; 4642 txq->log_id = queue; 4643 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4644 for_each_present_cpu(cpu) { 4645 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4646 txq_pcpu->cpu = cpu; 4647 } 4648 4649 port->txqs[queue] = txq; 4650 } 4651 4652 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4653 GFP_KERNEL); 4654 if (!port->rxqs) 4655 return -ENOMEM; 4656 4657 /* Allocate and initialize Rx queue for this port */ 4658 for (queue = 0; queue < rxq_number; queue++) { 4659 struct mvpp2_rx_queue *rxq; 4660 4661 /* Map physical Rx queue to port's logical Rx queue */ 4662 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4663 if (!rxq) 4664 return -ENOMEM; 4665 /* Map this Rx queue to a physical queue */ 4666 rxq->id = port->first_rxq + queue; 4667 rxq->port = port->id; 4668 rxq->logic_rxq = queue; 4669 4670 port->rxqs[queue] = rxq; 4671 } 4672 4673 4674 /* Create Rx descriptor rings */ 4675 for (queue = 0; queue < rxq_number; queue++) { 4676 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4677 4678 rxq->size = port->rx_ring_size; 4679 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4680 rxq->time_coal = MVPP2_RX_COAL_USEC; 4681 } 4682 4683 mvpp2_ingress_disable(port); 4684 4685 /* Port default configuration */ 4686 mvpp2_defaults_set(port); 4687 4688 /* Port's classifier configuration */ 4689 mvpp2_cls_oversize_rxq_set(port); 4690 mvpp2_cls_port_config(port); 4691 4692 /* Provide an initial Rx packet size */ 4693 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4694 4695 /* Initialize pools for swf */ 4696 err = mvpp2_swf_bm_pool_init(port); 4697 if (err) 4698 return err; 4699 4700 return 0; 4701 } 4702 4703 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4704 { 4705 int port_node = dev_of_offset(dev); 4706 const char *phy_mode_str; 4707 int phy_node; 4708 u32 id; 4709 u32 phyaddr = 0; 4710 int phy_mode = -1; 4711 4712 /* Default mdio_base from the same eth base */ 4713 if (port->priv->hw_version == MVPP21) 4714 port->mdio_base = port->priv->lms_base + MVPP21_SMI; 4715 else 4716 port->mdio_base = port->priv->iface_base + MVPP22_SMI; 4717 4718 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4719 4720 if (phy_node > 0) { 4721 ofnode phy_ofnode; 4722 fdt_addr_t phy_base; 4723 4724 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4725 if (phyaddr < 0) { 4726 dev_err(&pdev->dev, "could not find phy address\n"); 4727 return -1; 4728 } 4729 4730 phy_ofnode = ofnode_get_parent(offset_to_ofnode(phy_node)); 4731 phy_base = ofnode_get_addr(phy_ofnode); 4732 port->mdio_base = (void *)phy_base; 4733 4734 if (port->mdio_base < 0) { 4735 dev_err(&pdev->dev, "could not find mdio base address\n"); 4736 return -1; 4737 } 4738 } else { 4739 phy_node = 0; 4740 } 4741 4742 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4743 if (phy_mode_str) 4744 phy_mode = phy_get_interface_by_name(phy_mode_str); 4745 if (phy_mode == -1) { 4746 dev_err(&pdev->dev, "incorrect phy mode\n"); 4747 return -EINVAL; 4748 } 4749 4750 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4751 if (id == -1) { 4752 dev_err(&pdev->dev, "missing port-id value\n"); 4753 return -EINVAL; 4754 } 4755 4756 #ifdef CONFIG_DM_GPIO 4757 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4758 &port->phy_reset_gpio, GPIOD_IS_OUT); 4759 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4760 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4761 #endif 4762 4763 /* 4764 * ToDo: 4765 * Not sure if this DT property "phy-speed" will get accepted, so 4766 * this might change later 4767 */ 4768 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4769 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4770 "phy-speed", 1000); 4771 4772 port->id = id; 4773 if (port->priv->hw_version == MVPP21) 4774 port->first_rxq = port->id * rxq_number; 4775 else 4776 port->first_rxq = port->id * port->priv->max_port_rxqs; 4777 port->phy_node = phy_node; 4778 port->phy_interface = phy_mode; 4779 port->phyaddr = phyaddr; 4780 4781 return 0; 4782 } 4783 4784 #ifdef CONFIG_DM_GPIO 4785 /* Port GPIO initialization */ 4786 static void mvpp2_gpio_init(struct mvpp2_port *port) 4787 { 4788 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4789 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4790 mdelay(10); 4791 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4792 } 4793 4794 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4795 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4796 } 4797 #endif 4798 4799 /* Ports initialization */ 4800 static int mvpp2_port_probe(struct udevice *dev, 4801 struct mvpp2_port *port, 4802 int port_node, 4803 struct mvpp2 *priv) 4804 { 4805 int err; 4806 4807 port->tx_ring_size = MVPP2_MAX_TXD; 4808 port->rx_ring_size = MVPP2_MAX_RXD; 4809 4810 err = mvpp2_port_init(dev, port); 4811 if (err < 0) { 4812 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4813 return err; 4814 } 4815 mvpp2_port_power_up(port); 4816 4817 #ifdef CONFIG_DM_GPIO 4818 mvpp2_gpio_init(port); 4819 #endif 4820 4821 priv->port_list[port->id] = port; 4822 priv->num_ports++; 4823 return 0; 4824 } 4825 4826 /* Initialize decoding windows */ 4827 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4828 struct mvpp2 *priv) 4829 { 4830 u32 win_enable; 4831 int i; 4832 4833 for (i = 0; i < 6; i++) { 4834 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4835 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4836 4837 if (i < 4) 4838 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4839 } 4840 4841 win_enable = 0; 4842 4843 for (i = 0; i < dram->num_cs; i++) { 4844 const struct mbus_dram_window *cs = dram->cs + i; 4845 4846 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4847 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4848 dram->mbus_dram_target_id); 4849 4850 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4851 (cs->size - 1) & 0xffff0000); 4852 4853 win_enable |= (1 << i); 4854 } 4855 4856 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4857 } 4858 4859 /* Initialize Rx FIFO's */ 4860 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4861 { 4862 int port; 4863 4864 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4865 if (priv->hw_version == MVPP22) { 4866 if (port == 0) { 4867 mvpp2_write(priv, 4868 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4869 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4870 mvpp2_write(priv, 4871 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4872 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4873 } else if (port == 1) { 4874 mvpp2_write(priv, 4875 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4876 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4877 mvpp2_write(priv, 4878 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4879 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4880 } else { 4881 mvpp2_write(priv, 4882 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4883 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4884 mvpp2_write(priv, 4885 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4886 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4887 } 4888 } else { 4889 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4890 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4891 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4892 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4893 } 4894 } 4895 4896 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4897 MVPP2_RX_FIFO_PORT_MIN_PKT); 4898 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4899 } 4900 4901 /* Initialize Tx FIFO's */ 4902 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4903 { 4904 int port, val; 4905 4906 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4907 /* Port 0 supports 10KB TX FIFO */ 4908 if (port == 0) { 4909 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4910 MVPP22_TX_FIFO_SIZE_MASK; 4911 } else { 4912 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4913 MVPP22_TX_FIFO_SIZE_MASK; 4914 } 4915 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4916 } 4917 } 4918 4919 static void mvpp2_axi_init(struct mvpp2 *priv) 4920 { 4921 u32 val, rdval, wrval; 4922 4923 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4924 4925 /* AXI Bridge Configuration */ 4926 4927 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4928 << MVPP22_AXI_ATTR_CACHE_OFFS; 4929 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4930 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4931 4932 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4933 << MVPP22_AXI_ATTR_CACHE_OFFS; 4934 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4935 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4936 4937 /* BM */ 4938 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4939 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4940 4941 /* Descriptors */ 4942 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4943 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4944 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4945 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4946 4947 /* Buffer Data */ 4948 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4949 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4950 4951 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4952 << MVPP22_AXI_CODE_CACHE_OFFS; 4953 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4954 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4955 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4956 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4957 4958 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4959 << MVPP22_AXI_CODE_CACHE_OFFS; 4960 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4961 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4962 4963 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4964 4965 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4966 << MVPP22_AXI_CODE_CACHE_OFFS; 4967 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4968 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4969 4970 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 4971 } 4972 4973 /* Initialize network controller common part HW */ 4974 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 4975 { 4976 const struct mbus_dram_target_info *dram_target_info; 4977 int err, i; 4978 u32 val; 4979 4980 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 4981 if ((rxq_number > priv->max_port_rxqs) || 4982 (txq_number > MVPP2_MAX_TXQ)) { 4983 dev_err(&pdev->dev, "invalid queue size parameter\n"); 4984 return -EINVAL; 4985 } 4986 4987 if (priv->hw_version == MVPP22) 4988 mvpp2_axi_init(priv); 4989 else { 4990 /* MBUS windows configuration */ 4991 dram_target_info = mvebu_mbus_dram_info(); 4992 if (dram_target_info) 4993 mvpp2_conf_mbus_windows(dram_target_info, priv); 4994 } 4995 4996 if (priv->hw_version == MVPP21) { 4997 /* Disable HW PHY polling */ 4998 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 4999 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5000 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5001 } else { 5002 /* Enable HW PHY polling */ 5003 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5004 val |= MVPP22_SMI_POLLING_EN; 5005 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5006 } 5007 5008 /* Allocate and initialize aggregated TXQs */ 5009 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5010 sizeof(struct mvpp2_tx_queue), 5011 GFP_KERNEL); 5012 if (!priv->aggr_txqs) 5013 return -ENOMEM; 5014 5015 for_each_present_cpu(i) { 5016 priv->aggr_txqs[i].id = i; 5017 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5018 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5019 MVPP2_AGGR_TXQ_SIZE, i, priv); 5020 if (err < 0) 5021 return err; 5022 } 5023 5024 /* Rx Fifo Init */ 5025 mvpp2_rx_fifo_init(priv); 5026 5027 /* Tx Fifo Init */ 5028 if (priv->hw_version == MVPP22) 5029 mvpp2_tx_fifo_init(priv); 5030 5031 if (priv->hw_version == MVPP21) 5032 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5033 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5034 5035 /* Allow cache snoop when transmiting packets */ 5036 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5037 5038 /* Buffer Manager initialization */ 5039 err = mvpp2_bm_init(dev, priv); 5040 if (err < 0) 5041 return err; 5042 5043 /* Parser default initialization */ 5044 err = mvpp2_prs_default_init(dev, priv); 5045 if (err < 0) 5046 return err; 5047 5048 /* Classifier default initialization */ 5049 mvpp2_cls_init(priv); 5050 5051 return 0; 5052 } 5053 5054 /* SMI / MDIO functions */ 5055 5056 static int smi_wait_ready(struct mvpp2_port *priv) 5057 { 5058 u32 timeout = MVPP2_SMI_TIMEOUT; 5059 u32 smi_reg; 5060 5061 /* wait till the SMI is not busy */ 5062 do { 5063 /* read smi register */ 5064 smi_reg = readl(priv->mdio_base); 5065 if (timeout-- == 0) { 5066 printf("Error: SMI busy timeout\n"); 5067 return -EFAULT; 5068 } 5069 } while (smi_reg & MVPP2_SMI_BUSY); 5070 5071 return 0; 5072 } 5073 5074 /* 5075 * mpp2_mdio_read - miiphy_read callback function. 5076 * 5077 * Returns 16bit phy register value, or 0xffff on error 5078 */ 5079 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5080 { 5081 struct mvpp2_port *priv = bus->priv; 5082 u32 smi_reg; 5083 u32 timeout; 5084 5085 /* check parameters */ 5086 if (addr > MVPP2_PHY_ADDR_MASK) { 5087 printf("Error: Invalid PHY address %d\n", addr); 5088 return -EFAULT; 5089 } 5090 5091 if (reg > MVPP2_PHY_REG_MASK) { 5092 printf("Err: Invalid register offset %d\n", reg); 5093 return -EFAULT; 5094 } 5095 5096 /* wait till the SMI is not busy */ 5097 if (smi_wait_ready(priv) < 0) 5098 return -EFAULT; 5099 5100 /* fill the phy address and regiser offset and read opcode */ 5101 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5102 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5103 | MVPP2_SMI_OPCODE_READ; 5104 5105 /* write the smi register */ 5106 writel(smi_reg, priv->mdio_base); 5107 5108 /* wait till read value is ready */ 5109 timeout = MVPP2_SMI_TIMEOUT; 5110 5111 do { 5112 /* read smi register */ 5113 smi_reg = readl(priv->mdio_base); 5114 if (timeout-- == 0) { 5115 printf("Err: SMI read ready timeout\n"); 5116 return -EFAULT; 5117 } 5118 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5119 5120 /* Wait for the data to update in the SMI register */ 5121 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5122 ; 5123 5124 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5125 } 5126 5127 /* 5128 * mpp2_mdio_write - miiphy_write callback function. 5129 * 5130 * Returns 0 if write succeed, -EINVAL on bad parameters 5131 * -ETIME on timeout 5132 */ 5133 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5134 u16 value) 5135 { 5136 struct mvpp2_port *priv = bus->priv; 5137 u32 smi_reg; 5138 5139 /* check parameters */ 5140 if (addr > MVPP2_PHY_ADDR_MASK) { 5141 printf("Error: Invalid PHY address %d\n", addr); 5142 return -EFAULT; 5143 } 5144 5145 if (reg > MVPP2_PHY_REG_MASK) { 5146 printf("Err: Invalid register offset %d\n", reg); 5147 return -EFAULT; 5148 } 5149 5150 /* wait till the SMI is not busy */ 5151 if (smi_wait_ready(priv) < 0) 5152 return -EFAULT; 5153 5154 /* fill the phy addr and reg offset and write opcode and data */ 5155 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5156 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5157 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5158 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5159 5160 /* write the smi register */ 5161 writel(smi_reg, priv->mdio_base); 5162 5163 return 0; 5164 } 5165 5166 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5167 { 5168 struct mvpp2_port *port = dev_get_priv(dev); 5169 struct mvpp2_rx_desc *rx_desc; 5170 struct mvpp2_bm_pool *bm_pool; 5171 dma_addr_t dma_addr; 5172 u32 bm, rx_status; 5173 int pool, rx_bytes, err; 5174 int rx_received; 5175 struct mvpp2_rx_queue *rxq; 5176 u8 *data; 5177 5178 /* Process RX packets */ 5179 rxq = port->rxqs[0]; 5180 5181 /* Get number of received packets and clamp the to-do */ 5182 rx_received = mvpp2_rxq_received(port, rxq->id); 5183 5184 /* Return if no packets are received */ 5185 if (!rx_received) 5186 return 0; 5187 5188 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5189 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5190 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5191 rx_bytes -= MVPP2_MH_SIZE; 5192 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5193 5194 bm = mvpp2_bm_cookie_build(port, rx_desc); 5195 pool = mvpp2_bm_cookie_pool_get(bm); 5196 bm_pool = &port->priv->bm_pools[pool]; 5197 5198 /* In case of an error, release the requested buffer pointer 5199 * to the Buffer Manager. This request process is controlled 5200 * by the hardware, and the information about the buffer is 5201 * comprised by the RX descriptor. 5202 */ 5203 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5204 mvpp2_rx_error(port, rx_desc); 5205 /* Return the buffer to the pool */ 5206 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5207 return 0; 5208 } 5209 5210 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5211 if (err) { 5212 netdev_err(port->dev, "failed to refill BM pools\n"); 5213 return 0; 5214 } 5215 5216 /* Update Rx queue management counters */ 5217 mb(); 5218 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5219 5220 /* give packet to stack - skip on first n bytes */ 5221 data = (u8 *)dma_addr + 2 + 32; 5222 5223 if (rx_bytes <= 0) 5224 return 0; 5225 5226 /* 5227 * No cache invalidation needed here, since the rx_buffer's are 5228 * located in a uncached memory region 5229 */ 5230 *packetp = data; 5231 5232 return rx_bytes; 5233 } 5234 5235 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5236 { 5237 struct mvpp2_port *port = dev_get_priv(dev); 5238 struct mvpp2_tx_queue *txq, *aggr_txq; 5239 struct mvpp2_tx_desc *tx_desc; 5240 int tx_done; 5241 int timeout; 5242 5243 txq = port->txqs[0]; 5244 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5245 5246 /* Get a descriptor for the first part of the packet */ 5247 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5248 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5249 mvpp2_txdesc_size_set(port, tx_desc, length); 5250 mvpp2_txdesc_offset_set(port, tx_desc, 5251 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5252 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5253 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5254 /* First and Last descriptor */ 5255 mvpp2_txdesc_cmd_set(port, tx_desc, 5256 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5257 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5258 5259 /* Flush tx data */ 5260 flush_dcache_range((unsigned long)packet, 5261 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5262 5263 /* Enable transmit */ 5264 mb(); 5265 mvpp2_aggr_txq_pend_desc_add(port, 1); 5266 5267 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5268 5269 timeout = 0; 5270 do { 5271 if (timeout++ > 10000) { 5272 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5273 return 0; 5274 } 5275 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5276 } while (tx_done); 5277 5278 timeout = 0; 5279 do { 5280 if (timeout++ > 10000) { 5281 printf("timeout: packet not sent\n"); 5282 return 0; 5283 } 5284 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5285 } while (!tx_done); 5286 5287 return 0; 5288 } 5289 5290 static int mvpp2_start(struct udevice *dev) 5291 { 5292 struct eth_pdata *pdata = dev_get_platdata(dev); 5293 struct mvpp2_port *port = dev_get_priv(dev); 5294 5295 /* Load current MAC address */ 5296 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5297 5298 /* Reconfigure parser accept the original MAC address */ 5299 mvpp2_prs_update_mac_da(port, port->dev_addr); 5300 5301 switch (port->phy_interface) { 5302 case PHY_INTERFACE_MODE_RGMII: 5303 case PHY_INTERFACE_MODE_RGMII_ID: 5304 case PHY_INTERFACE_MODE_SGMII: 5305 mvpp2_port_power_up(port); 5306 default: 5307 break; 5308 } 5309 5310 mvpp2_open(dev, port); 5311 5312 return 0; 5313 } 5314 5315 static void mvpp2_stop(struct udevice *dev) 5316 { 5317 struct mvpp2_port *port = dev_get_priv(dev); 5318 5319 mvpp2_stop_dev(port); 5320 mvpp2_cleanup_rxqs(port); 5321 mvpp2_cleanup_txqs(port); 5322 } 5323 5324 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5325 { 5326 writel(port->phyaddr, port->priv->iface_base + 5327 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5328 5329 return 0; 5330 } 5331 5332 static int mvpp2_base_probe(struct udevice *dev) 5333 { 5334 struct mvpp2 *priv = dev_get_priv(dev); 5335 void *bd_space; 5336 u32 size = 0; 5337 int i; 5338 5339 /* Save hw-version */ 5340 priv->hw_version = dev_get_driver_data(dev); 5341 5342 /* 5343 * U-Boot special buffer handling: 5344 * 5345 * Allocate buffer area for descs and rx_buffers. This is only 5346 * done once for all interfaces. As only one interface can 5347 * be active. Make this area DMA-safe by disabling the D-cache 5348 */ 5349 5350 /* Align buffer area for descs and rx_buffers to 1MiB */ 5351 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5352 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5353 BD_SPACE, DCACHE_OFF); 5354 5355 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5356 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5357 5358 buffer_loc.tx_descs = 5359 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5360 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5361 5362 buffer_loc.rx_descs = 5363 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5364 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5365 5366 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5367 buffer_loc.bm_pool[i] = 5368 (unsigned long *)((unsigned long)bd_space + size); 5369 if (priv->hw_version == MVPP21) 5370 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5371 else 5372 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5373 } 5374 5375 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5376 buffer_loc.rx_buffer[i] = 5377 (unsigned long *)((unsigned long)bd_space + size); 5378 size += RX_BUFFER_SIZE; 5379 } 5380 5381 /* Clear the complete area so that all descriptors are cleared */ 5382 memset(bd_space, 0, size); 5383 5384 /* Save base addresses for later use */ 5385 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5386 if (IS_ERR(priv->base)) 5387 return PTR_ERR(priv->base); 5388 5389 if (priv->hw_version == MVPP21) { 5390 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5391 if (IS_ERR(priv->lms_base)) 5392 return PTR_ERR(priv->lms_base); 5393 } else { 5394 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5395 if (IS_ERR(priv->iface_base)) 5396 return PTR_ERR(priv->iface_base); 5397 5398 /* Store common base addresses for all ports */ 5399 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5400 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5401 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5402 } 5403 5404 if (priv->hw_version == MVPP21) 5405 priv->max_port_rxqs = 8; 5406 else 5407 priv->max_port_rxqs = 32; 5408 5409 return 0; 5410 } 5411 5412 static int mvpp2_probe(struct udevice *dev) 5413 { 5414 struct mvpp2_port *port = dev_get_priv(dev); 5415 struct mvpp2 *priv = dev_get_priv(dev->parent); 5416 struct mii_dev *bus; 5417 int err; 5418 5419 /* Only call the probe function for the parent once */ 5420 if (!priv->probe_done) 5421 err = mvpp2_base_probe(dev->parent); 5422 5423 port->priv = dev_get_priv(dev->parent); 5424 5425 /* Create and register the MDIO bus driver */ 5426 bus = mdio_alloc(); 5427 if (!bus) { 5428 printf("Failed to allocate MDIO bus\n"); 5429 return -ENOMEM; 5430 } 5431 5432 bus->read = mpp2_mdio_read; 5433 bus->write = mpp2_mdio_write; 5434 snprintf(bus->name, sizeof(bus->name), dev->name); 5435 bus->priv = (void *)port; 5436 port->bus = bus; 5437 5438 err = mdio_register(bus); 5439 if (err) 5440 return err; 5441 5442 err = phy_info_parse(dev, port); 5443 if (err) 5444 return err; 5445 5446 /* 5447 * We need the port specific io base addresses at this stage, since 5448 * gop_port_init() accesses these registers 5449 */ 5450 if (priv->hw_version == MVPP21) { 5451 int priv_common_regs_num = 2; 5452 5453 port->base = (void __iomem *)devfdt_get_addr_index( 5454 dev->parent, priv_common_regs_num + port->id); 5455 if (IS_ERR(port->base)) 5456 return PTR_ERR(port->base); 5457 } else { 5458 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5459 "gop-port-id", -1); 5460 if (port->id == -1) { 5461 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5462 return -EINVAL; 5463 } 5464 5465 port->base = priv->iface_base + MVPP22_PORT_BASE + 5466 port->gop_id * MVPP22_PORT_OFFSET; 5467 5468 /* Set phy address of the port */ 5469 if(port->phy_node) 5470 mvpp22_smi_phy_addr_cfg(port); 5471 5472 /* GoP Init */ 5473 gop_port_init(port); 5474 } 5475 5476 if (!priv->probe_done) { 5477 /* Initialize network controller */ 5478 err = mvpp2_init(dev, priv); 5479 if (err < 0) { 5480 dev_err(&pdev->dev, "failed to initialize controller\n"); 5481 return err; 5482 } 5483 priv->num_ports = 0; 5484 priv->probe_done = 1; 5485 } 5486 5487 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5488 if (err) 5489 return err; 5490 5491 if (priv->hw_version == MVPP22) { 5492 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5493 port->phy_interface); 5494 5495 /* Netcomplex configurations for all ports */ 5496 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5497 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5498 } 5499 5500 return 0; 5501 } 5502 5503 /* 5504 * Empty BM pool and stop its activity before the OS is started 5505 */ 5506 static int mvpp2_remove(struct udevice *dev) 5507 { 5508 struct mvpp2_port *port = dev_get_priv(dev); 5509 struct mvpp2 *priv = port->priv; 5510 int i; 5511 5512 priv->num_ports--; 5513 5514 if (priv->num_ports) 5515 return 0; 5516 5517 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5518 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5519 5520 return 0; 5521 } 5522 5523 static const struct eth_ops mvpp2_ops = { 5524 .start = mvpp2_start, 5525 .send = mvpp2_send, 5526 .recv = mvpp2_recv, 5527 .stop = mvpp2_stop, 5528 }; 5529 5530 static struct driver mvpp2_driver = { 5531 .name = "mvpp2", 5532 .id = UCLASS_ETH, 5533 .probe = mvpp2_probe, 5534 .remove = mvpp2_remove, 5535 .ops = &mvpp2_ops, 5536 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5537 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5538 .flags = DM_FLAG_ACTIVE_DMA, 5539 }; 5540 5541 /* 5542 * Use a MISC device to bind the n instances (child nodes) of the 5543 * network base controller in UCLASS_ETH. 5544 */ 5545 static int mvpp2_base_bind(struct udevice *parent) 5546 { 5547 const void *blob = gd->fdt_blob; 5548 int node = dev_of_offset(parent); 5549 struct uclass_driver *drv; 5550 struct udevice *dev; 5551 struct eth_pdata *plat; 5552 char *name; 5553 int subnode; 5554 u32 id; 5555 int base_id_add; 5556 5557 /* Lookup eth driver */ 5558 drv = lists_uclass_lookup(UCLASS_ETH); 5559 if (!drv) { 5560 puts("Cannot find eth driver\n"); 5561 return -ENOENT; 5562 } 5563 5564 base_id_add = base_id; 5565 5566 fdt_for_each_subnode(subnode, blob, node) { 5567 /* Increment base_id for all subnodes, also the disabled ones */ 5568 base_id++; 5569 5570 /* Skip disabled ports */ 5571 if (!fdtdec_get_is_enabled(blob, subnode)) 5572 continue; 5573 5574 plat = calloc(1, sizeof(*plat)); 5575 if (!plat) 5576 return -ENOMEM; 5577 5578 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5579 id += base_id_add; 5580 5581 name = calloc(1, 16); 5582 if (!name) { 5583 free(plat); 5584 return -ENOMEM; 5585 } 5586 sprintf(name, "mvpp2-%d", id); 5587 5588 /* Create child device UCLASS_ETH and bind it */ 5589 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5590 dev_set_of_offset(dev, subnode); 5591 } 5592 5593 return 0; 5594 } 5595 5596 static const struct udevice_id mvpp2_ids[] = { 5597 { 5598 .compatible = "marvell,armada-375-pp2", 5599 .data = MVPP21, 5600 }, 5601 { 5602 .compatible = "marvell,armada-7k-pp22", 5603 .data = MVPP22, 5604 }, 5605 { } 5606 }; 5607 5608 U_BOOT_DRIVER(mvpp2_base) = { 5609 .name = "mvpp2_base", 5610 .id = UCLASS_MISC, 5611 .of_match = mvpp2_ids, 5612 .bind = mvpp2_base_bind, 5613 .priv_auto_alloc_size = sizeof(struct mvpp2), 5614 }; 5615