1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell MCS driver 3 * 4 * Copyright (C) 2022 Marvell. 5 */ 6 7 #include "mcs.h" 8 #include "mcs_reg.h" 9 10 static struct mcs_ops cnf10kb_mcs_ops = { 11 .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities, 12 .mcs_parser_cfg = cnf10kb_mcs_parser_cfg, 13 .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, 14 .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, 15 .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, 16 }; 17 18 struct mcs_ops *cnf10kb_get_mac_ops(void) 19 { 20 return &cnf10kb_mcs_ops; 21 } 22 23 void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs) 24 { 25 struct hwinfo *hw = mcs->hw; 26 27 hw->tcam_entries = 64; /* TCAM entries */ 28 hw->secy_entries = 64; /* SecY entries */ 29 hw->sc_entries = 64; /* SC CAM entries */ 30 hw->sa_entries = 128; /* SA entries */ 31 hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ 32 hw->mcs_x2p_intf = 1; /* x2p clabration intf */ 33 hw->mcs_blks = 7; /* MCS blocks */ 34 } 35 36 void cnf10kb_mcs_parser_cfg(struct mcs *mcs) 37 { 38 u64 reg, val; 39 40 /* VLAN Ctag */ 41 val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22); 42 43 reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0); 44 mcs_reg_write(mcs, reg, val); 45 46 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0); 47 mcs_reg_write(mcs, reg, val); 48 49 /* VLAN STag */ 50 val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23); 51 52 /* RX */ 53 reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1); 54 mcs_reg_write(mcs, reg, val); 55 56 /* TX */ 57 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1); 58 mcs_reg_write(mcs, reg, val); 59 60 /* Enable custom tage 0 and 1 and sectag */ 61 val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12); 62 63 reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE; 64 mcs_reg_write(mcs, reg, val); 65 66 reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; 67 mcs_reg_write(mcs, reg, val); 68 } 69 70 void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) 71 { 72 u64 reg, val; 73 74 val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6; 75 if (dir == MCS_RX) { 76 reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); 77 } else { 78 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); 79 mcs_reg_write(mcs, reg, map->sci); 80 val |= (map->sc & 0x3F) << 7; 81 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id); 82 } 83 84 mcs_reg_write(mcs, reg, val); 85 } 86 87 void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) 88 { 89 u64 reg, val; 90 91 val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7; 92 93 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); 94 mcs_reg_write(mcs, reg, val); 95 96 reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0; 97 val = mcs_reg_read(mcs, reg); 98 99 if (map->rekey_ena) 100 val |= BIT_ULL(map->sc_id); 101 else 102 val &= ~BIT_ULL(map->sc_id); 103 104 mcs_reg_write(mcs, reg, val); 105 106 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld); 107 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld); 108 109 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active); 110 } 111 112 void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) 113 { 114 u64 val, reg; 115 116 val = (map->sa_index & 0x7F) | (map->sa_in_use << 7); 117 118 reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); 119 mcs_reg_write(mcs, reg, val); 120 } 121 122 int mcs_set_force_clk_en(struct mcs *mcs, bool set) 123 { 124 unsigned long timeout = jiffies + usecs_to_jiffies(2000); 125 u64 val; 126 127 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); 128 129 if (set) { 130 val |= BIT_ULL(4); 131 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); 132 133 /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */ 134 while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) { 135 if (time_after(jiffies, timeout)) { 136 dev_err(mcs->dev, "MCS set force clk enable failed\n"); 137 break; 138 } 139 } 140 } else { 141 val &= ~BIT_ULL(4); 142 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); 143 } 144 145 return 0; 146 } 147 148 /* TX SA interrupt is raised only if autorekey is enabled. 149 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if 150 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies 151 * SA in SA_index1 got expired else SA in SA_index0 got expired. 152 */ 153 void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) 154 { 155 struct mcs_intr_event event; 156 struct rsrc_bmap *sc_bmap; 157 unsigned long rekey_ena; 158 u64 val, sa_status; 159 int sc; 160 161 sc_bmap = &mcs->tx.sc; 162 163 event.mcs_id = mcs->mcs_id; 164 event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; 165 166 rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0); 167 168 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { 169 /* Auto rekey is enable */ 170 if (!test_bit(sc, &rekey_ena)) 171 continue; 172 sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc)); 173 /* Check if tx_sa_active status had changed */ 174 if (sa_status == mcs->tx_sa_active[sc]) 175 continue; 176 177 /* SA_index0 is expired */ 178 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); 179 if (sa_status) 180 event.sa_id = val & 0x7F; 181 else 182 event.sa_id = (val >> 7) & 0x7F; 183 184 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; 185 mcs_add_intr_wq_entry(mcs, &event); 186 } 187 } 188 189 void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) 190 { 191 struct mcs_intr_event event = { 0 }; 192 struct rsrc_bmap *sc_bmap; 193 u64 val; 194 int sc; 195 196 sc_bmap = &mcs->tx.sc; 197 198 event.mcs_id = mcs->mcs_id; 199 event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; 200 201 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { 202 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); 203 204 if (mcs->tx_sa_active[sc]) 205 /* SA_index1 was used and got expired */ 206 event.sa_id = (val >> 7) & 0x7F; 207 else 208 /* SA_index0 was used and got expired */ 209 event.sa_id = val & 0x7F; 210 211 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; 212 mcs_add_intr_wq_entry(mcs, &event); 213 } 214 } 215