1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8 #include "otx2_common.h"
9
otx2_dmacflt_do_add(struct otx2_nic * pf,const u8 * mac,u32 * dmac_index)10 static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
11 u32 *dmac_index)
12 {
13 struct cgx_mac_addr_add_req *req;
14 struct cgx_mac_addr_add_rsp *rsp;
15 int err;
16
17 mutex_lock(&pf->mbox.lock);
18
19 req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
20 if (!req) {
21 mutex_unlock(&pf->mbox.lock);
22 return -ENOMEM;
23 }
24
25 ether_addr_copy(req->mac_addr, mac);
26 err = otx2_sync_mbox_msg(&pf->mbox);
27
28 if (!err) {
29 rsp = (struct cgx_mac_addr_add_rsp *)
30 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
31 if (IS_ERR(rsp)) {
32 mutex_unlock(&pf->mbox.lock);
33 return PTR_ERR(rsp);
34 }
35
36 *dmac_index = rsp->index;
37 }
38
39 mutex_unlock(&pf->mbox.lock);
40 return err;
41 }
42
otx2_dmacflt_add_pfmac(struct otx2_nic * pf,u32 * dmac_index)43 static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
44 {
45 struct cgx_mac_addr_set_or_get *req;
46 struct cgx_mac_addr_set_or_get *rsp;
47 int err;
48
49 mutex_lock(&pf->mbox.lock);
50
51 req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
52 if (!req) {
53 mutex_unlock(&pf->mbox.lock);
54 return -ENOMEM;
55 }
56
57 req->index = *dmac_index;
58
59 ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
60 err = otx2_sync_mbox_msg(&pf->mbox);
61
62 if (err)
63 goto out;
64
65 rsp = (struct cgx_mac_addr_set_or_get *)
66 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
67
68 if (IS_ERR_OR_NULL(rsp)) {
69 err = -EINVAL;
70 goto out;
71 }
72
73 *dmac_index = rsp->index;
74 out:
75 mutex_unlock(&pf->mbox.lock);
76 return err;
77 }
78
otx2_dmacflt_add(struct otx2_nic * pf,const u8 * mac,u32 bit_pos)79 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
80 {
81 u32 *dmacindex;
82
83 /* Store dmacindex returned by CGX/RPM driver which will
84 * be used for macaddr update/remove
85 */
86 dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
87
88 if (ether_addr_equal(mac, pf->netdev->dev_addr))
89 return otx2_dmacflt_add_pfmac(pf, dmacindex);
90 else
91 return otx2_dmacflt_do_add(pf, mac, dmacindex);
92 }
93
otx2_dmacflt_do_remove(struct otx2_nic * pfvf,const u8 * mac,u32 dmac_index)94 static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
95 u32 dmac_index)
96 {
97 struct cgx_mac_addr_del_req *req;
98 int err;
99
100 mutex_lock(&pfvf->mbox.lock);
101 req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
102 if (!req) {
103 mutex_unlock(&pfvf->mbox.lock);
104 return -ENOMEM;
105 }
106
107 req->index = dmac_index;
108
109 err = otx2_sync_mbox_msg(&pfvf->mbox);
110 mutex_unlock(&pfvf->mbox.lock);
111
112 return err;
113 }
114
otx2_dmacflt_remove_pfmac(struct otx2_nic * pf,u32 dmac_index)115 static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
116 {
117 struct cgx_mac_addr_reset_req *req;
118 int err;
119
120 mutex_lock(&pf->mbox.lock);
121 req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
122 if (!req) {
123 mutex_unlock(&pf->mbox.lock);
124 return -ENOMEM;
125 }
126 req->index = dmac_index;
127
128 err = otx2_sync_mbox_msg(&pf->mbox);
129
130 mutex_unlock(&pf->mbox.lock);
131 return err;
132 }
133
otx2_dmacflt_remove(struct otx2_nic * pf,const u8 * mac,u32 bit_pos)134 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
135 u32 bit_pos)
136 {
137 u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
138
139 if (ether_addr_equal(mac, pf->netdev->dev_addr))
140 return otx2_dmacflt_remove_pfmac(pf, dmacindex);
141 else
142 return otx2_dmacflt_do_remove(pf, mac, dmacindex);
143 }
144
145 /* CGX/RPM blocks support max unicast entries of 32.
146 * on typical configuration MAC block associated
147 * with 4 lmacs, each lmac will have 8 dmac entries
148 */
otx2_dmacflt_get_max_cnt(struct otx2_nic * pf)149 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
150 {
151 struct cgx_max_dmac_entries_get_rsp *rsp;
152 struct msg_req *msg;
153 int err;
154
155 mutex_lock(&pf->mbox.lock);
156 msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
157
158 if (!msg) {
159 mutex_unlock(&pf->mbox.lock);
160 return -ENOMEM;
161 }
162
163 err = otx2_sync_mbox_msg(&pf->mbox);
164 if (err)
165 goto out;
166
167 rsp = (struct cgx_max_dmac_entries_get_rsp *)
168 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
169
170 if (IS_ERR_OR_NULL(rsp)) {
171 err = -EINVAL;
172 goto out;
173 }
174
175 pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
176
177 out:
178 mutex_unlock(&pf->mbox.lock);
179 return err;
180 }
181
otx2_dmacflt_update(struct otx2_nic * pf,u8 * mac,u32 bit_pos)182 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
183 {
184 struct cgx_mac_addr_update_req *req;
185 struct cgx_mac_addr_update_rsp *rsp;
186 int rc;
187
188 mutex_lock(&pf->mbox.lock);
189
190 req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
191
192 if (!req) {
193 mutex_unlock(&pf->mbox.lock);
194 return -ENOMEM;
195 }
196
197 ether_addr_copy(req->mac_addr, mac);
198 req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
199
200 /* check the response and change index */
201
202 rc = otx2_sync_mbox_msg(&pf->mbox);
203 if (rc)
204 goto out;
205
206 rsp = (struct cgx_mac_addr_update_rsp *)
207 otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
208 if (IS_ERR(rsp)) {
209 rc = PTR_ERR(rsp);
210 goto out;
211 }
212
213 pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
214
215 out:
216 mutex_unlock(&pf->mbox.lock);
217 return rc;
218 }
219