1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 RVU Admin Function driver 3 * 4 * Copyright (C) 2021 Marvell. 5 */ 6 7 #include <linux/bitfield.h> 8 #include "rvu.h" 9 10 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, 11 u16 chan_mask) 12 { 13 struct npc_install_flow_req req = { 0 }; 14 struct npc_install_flow_rsp rsp = { 0 }; 15 struct rvu_pfvf *pfvf; 16 17 pfvf = rvu_get_pfvf(rvu, pcifunc); 18 /* If the pcifunc is not initialized then nothing to do. 19 * This same function will be called again via rvu_switch_update_rules 20 * after pcifunc is initialized. 21 */ 22 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) 23 return 0; 24 25 ether_addr_copy(req.packet.dmac, pfvf->mac_addr); 26 eth_broadcast_addr((u8 *)&req.mask.dmac); 27 req.hdr.pcifunc = 0; /* AF is requester */ 28 req.vf = pcifunc; 29 req.features = BIT_ULL(NPC_DMAC); 30 req.channel = pfvf->rx_chan_base; 31 req.chan_mask = chan_mask; 32 req.intf = pfvf->nix_rx_intf; 33 req.op = NIX_RX_ACTION_DEFAULT; 34 req.default_rule = 1; 35 36 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 37 } 38 39 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) 40 { 41 struct npc_install_flow_req req = { 0 }; 42 struct npc_install_flow_rsp rsp = { 0 }; 43 struct rvu_pfvf *pfvf; 44 u8 lbkid; 45 46 pfvf = rvu_get_pfvf(rvu, pcifunc); 47 /* If the pcifunc is not initialized then nothing to do. 48 * This same function will be called again via rvu_switch_update_rules 49 * after pcifunc is initialized. 50 */ 51 if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) 52 return 0; 53 54 lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; 55 ether_addr_copy(req.packet.dmac, pfvf->mac_addr); 56 eth_broadcast_addr((u8 *)&req.mask.dmac); 57 req.hdr.pcifunc = 0; /* AF is requester */ 58 req.vf = pcifunc; 59 req.entry = entry; 60 req.features = BIT_ULL(NPC_DMAC); 61 req.intf = pfvf->nix_tx_intf; 62 req.op = NIX_TX_ACTIONOP_UCAST_CHAN; 63 req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; 64 req.set_cntr = 1; 65 66 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); 67 } 68 69 static int rvu_switch_install_rules(struct rvu *rvu) 70 { 71 struct rvu_switch *rswitch = &rvu->rswitch; 72 u16 start = rswitch->start_entry; 73 struct rvu_hwinfo *hw = rvu->hw; 74 int pf, vf, numvfs, hwvf; 75 u16 pcifunc, entry = 0; 76 int err; 77 78 for (pf = 1; pf < hw->total_pfs; pf++) { 79 if (!is_pf_cgxmapped(rvu, pf)) 80 continue; 81 82 pcifunc = pf << 10; 83 /* rvu_get_nix_blkaddr sets up the corresponding NIX block 84 * address and NIX RX and TX interfaces for a pcifunc. 85 * Generally it is called during attach call of a pcifunc but it 86 * is called here since we are pre-installing rules before 87 * nixlfs are attached 88 */ 89 rvu_get_nix_blkaddr(rvu, pcifunc); 90 91 /* MCAM RX rule for a PF/VF already exists as default unicast 92 * rules installed by AF. Hence change the channel in those 93 * rules to ignore channel so that packets with the required 94 * DMAC received from LBK(by other PF/VFs in system) or from 95 * external world (from wire) are accepted. 96 */ 97 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 98 if (err) { 99 dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", 100 pf, err); 101 return err; 102 } 103 104 err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); 105 if (err) { 106 dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", 107 pf, err); 108 return err; 109 } 110 111 rswitch->entry2pcifunc[entry++] = pcifunc; 112 113 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); 114 for (vf = 0; vf < numvfs; vf++, hwvf++) { 115 pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 116 rvu_get_nix_blkaddr(rvu, pcifunc); 117 118 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 119 if (err) { 120 dev_err(rvu->dev, 121 "RX rule for PF%dVF%d failed(%d)\n", 122 pf, vf, err); 123 return err; 124 } 125 126 err = rvu_switch_install_tx_rule(rvu, pcifunc, 127 start + entry); 128 if (err) { 129 dev_err(rvu->dev, 130 "TX rule for PF%dVF%d failed(%d)\n", 131 pf, vf, err); 132 return err; 133 } 134 135 rswitch->entry2pcifunc[entry++] = pcifunc; 136 } 137 } 138 139 return 0; 140 } 141 142 void rvu_switch_enable(struct rvu *rvu) 143 { 144 struct npc_mcam_alloc_entry_req alloc_req = { 0 }; 145 struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; 146 struct npc_delete_flow_req uninstall_req = { 0 }; 147 struct npc_mcam_free_entry_req free_req = { 0 }; 148 struct rvu_switch *rswitch = &rvu->rswitch; 149 struct msg_rsp rsp; 150 int ret; 151 152 alloc_req.contig = true; 153 alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; 154 ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, 155 &alloc_rsp); 156 if (ret) { 157 dev_err(rvu->dev, 158 "Unable to allocate MCAM entries\n"); 159 goto exit; 160 } 161 162 if (alloc_rsp.count != alloc_req.count) { 163 dev_err(rvu->dev, 164 "Unable to allocate %d MCAM entries, got %d\n", 165 alloc_req.count, alloc_rsp.count); 166 goto free_entries; 167 } 168 169 rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), 170 GFP_KERNEL); 171 if (!rswitch->entry2pcifunc) 172 goto free_entries; 173 174 rswitch->used_entries = alloc_rsp.count; 175 rswitch->start_entry = alloc_rsp.entry; 176 177 ret = rvu_switch_install_rules(rvu); 178 if (ret) 179 goto uninstall_rules; 180 181 return; 182 183 uninstall_rules: 184 uninstall_req.start = rswitch->start_entry; 185 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; 186 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); 187 kfree(rswitch->entry2pcifunc); 188 free_entries: 189 free_req.all = 1; 190 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); 191 exit: 192 return; 193 } 194 195 void rvu_switch_disable(struct rvu *rvu) 196 { 197 struct npc_delete_flow_req uninstall_req = { 0 }; 198 struct npc_mcam_free_entry_req free_req = { 0 }; 199 struct rvu_switch *rswitch = &rvu->rswitch; 200 struct rvu_hwinfo *hw = rvu->hw; 201 int pf, vf, numvfs, hwvf; 202 struct msg_rsp rsp; 203 u16 pcifunc; 204 int err; 205 206 if (!rswitch->used_entries) 207 return; 208 209 for (pf = 1; pf < hw->total_pfs; pf++) { 210 if (!is_pf_cgxmapped(rvu, pf)) 211 continue; 212 213 pcifunc = pf << 10; 214 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 215 if (err) 216 dev_err(rvu->dev, 217 "Reverting RX rule for PF%d failed(%d)\n", 218 pf, err); 219 220 for (vf = 0; vf < numvfs; vf++, hwvf++) { 221 pcifunc = pf << 10 | ((vf + 1) & 0x3FF); 222 err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); 223 if (err) 224 dev_err(rvu->dev, 225 "Reverting RX rule for PF%dVF%d failed(%d)\n", 226 pf, vf, err); 227 } 228 } 229 230 uninstall_req.start = rswitch->start_entry; 231 uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; 232 free_req.all = 1; 233 rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); 234 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); 235 rswitch->used_entries = 0; 236 kfree(rswitch->entry2pcifunc); 237 } 238 239 void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) 240 { 241 struct rvu_switch *rswitch = &rvu->rswitch; 242 u32 max = rswitch->used_entries; 243 u16 entry; 244 245 if (!rswitch->used_entries) 246 return; 247 248 for (entry = 0; entry < max; entry++) { 249 if (rswitch->entry2pcifunc[entry] == pcifunc) 250 break; 251 } 252 253 if (entry >= max) 254 return; 255 256 rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); 257 rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); 258 } 259