1*d8899132SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear
2*d8899132SKalle Valo /*
3*d8899132SKalle Valo * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4*d8899132SKalle Valo * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5*d8899132SKalle Valo */
6*d8899132SKalle Valo
7*d8899132SKalle Valo #include "core.h"
8*d8899132SKalle Valo #include "peer.h"
9*d8899132SKalle Valo #include "debug.h"
10*d8899132SKalle Valo
ath12k_peer_find(struct ath12k_base * ab,int vdev_id,const u8 * addr)11*d8899132SKalle Valo struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
12*d8899132SKalle Valo const u8 *addr)
13*d8899132SKalle Valo {
14*d8899132SKalle Valo struct ath12k_peer *peer;
15*d8899132SKalle Valo
16*d8899132SKalle Valo lockdep_assert_held(&ab->base_lock);
17*d8899132SKalle Valo
18*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list) {
19*d8899132SKalle Valo if (peer->vdev_id != vdev_id)
20*d8899132SKalle Valo continue;
21*d8899132SKalle Valo if (!ether_addr_equal(peer->addr, addr))
22*d8899132SKalle Valo continue;
23*d8899132SKalle Valo
24*d8899132SKalle Valo return peer;
25*d8899132SKalle Valo }
26*d8899132SKalle Valo
27*d8899132SKalle Valo return NULL;
28*d8899132SKalle Valo }
29*d8899132SKalle Valo
ath12k_peer_find_by_pdev_idx(struct ath12k_base * ab,u8 pdev_idx,const u8 * addr)30*d8899132SKalle Valo static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
31*d8899132SKalle Valo u8 pdev_idx, const u8 *addr)
32*d8899132SKalle Valo {
33*d8899132SKalle Valo struct ath12k_peer *peer;
34*d8899132SKalle Valo
35*d8899132SKalle Valo lockdep_assert_held(&ab->base_lock);
36*d8899132SKalle Valo
37*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list) {
38*d8899132SKalle Valo if (peer->pdev_idx != pdev_idx)
39*d8899132SKalle Valo continue;
40*d8899132SKalle Valo if (!ether_addr_equal(peer->addr, addr))
41*d8899132SKalle Valo continue;
42*d8899132SKalle Valo
43*d8899132SKalle Valo return peer;
44*d8899132SKalle Valo }
45*d8899132SKalle Valo
46*d8899132SKalle Valo return NULL;
47*d8899132SKalle Valo }
48*d8899132SKalle Valo
ath12k_peer_find_by_addr(struct ath12k_base * ab,const u8 * addr)49*d8899132SKalle Valo struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
50*d8899132SKalle Valo const u8 *addr)
51*d8899132SKalle Valo {
52*d8899132SKalle Valo struct ath12k_peer *peer;
53*d8899132SKalle Valo
54*d8899132SKalle Valo lockdep_assert_held(&ab->base_lock);
55*d8899132SKalle Valo
56*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list) {
57*d8899132SKalle Valo if (!ether_addr_equal(peer->addr, addr))
58*d8899132SKalle Valo continue;
59*d8899132SKalle Valo
60*d8899132SKalle Valo return peer;
61*d8899132SKalle Valo }
62*d8899132SKalle Valo
63*d8899132SKalle Valo return NULL;
64*d8899132SKalle Valo }
65*d8899132SKalle Valo
ath12k_peer_find_by_id(struct ath12k_base * ab,int peer_id)66*d8899132SKalle Valo struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
67*d8899132SKalle Valo int peer_id)
68*d8899132SKalle Valo {
69*d8899132SKalle Valo struct ath12k_peer *peer;
70*d8899132SKalle Valo
71*d8899132SKalle Valo lockdep_assert_held(&ab->base_lock);
72*d8899132SKalle Valo
73*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list)
74*d8899132SKalle Valo if (peer_id == peer->peer_id)
75*d8899132SKalle Valo return peer;
76*d8899132SKalle Valo
77*d8899132SKalle Valo return NULL;
78*d8899132SKalle Valo }
79*d8899132SKalle Valo
ath12k_peer_exist_by_vdev_id(struct ath12k_base * ab,int vdev_id)80*d8899132SKalle Valo bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
81*d8899132SKalle Valo {
82*d8899132SKalle Valo struct ath12k_peer *peer;
83*d8899132SKalle Valo
84*d8899132SKalle Valo spin_lock_bh(&ab->base_lock);
85*d8899132SKalle Valo
86*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list) {
87*d8899132SKalle Valo if (vdev_id == peer->vdev_id) {
88*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
89*d8899132SKalle Valo return true;
90*d8899132SKalle Valo }
91*d8899132SKalle Valo }
92*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
93*d8899132SKalle Valo return false;
94*d8899132SKalle Valo }
95*d8899132SKalle Valo
ath12k_peer_find_by_ast(struct ath12k_base * ab,int ast_hash)96*d8899132SKalle Valo struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
97*d8899132SKalle Valo int ast_hash)
98*d8899132SKalle Valo {
99*d8899132SKalle Valo struct ath12k_peer *peer;
100*d8899132SKalle Valo
101*d8899132SKalle Valo lockdep_assert_held(&ab->base_lock);
102*d8899132SKalle Valo
103*d8899132SKalle Valo list_for_each_entry(peer, &ab->peers, list)
104*d8899132SKalle Valo if (ast_hash == peer->ast_hash)
105*d8899132SKalle Valo return peer;
106*d8899132SKalle Valo
107*d8899132SKalle Valo return NULL;
108*d8899132SKalle Valo }
109*d8899132SKalle Valo
ath12k_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)110*d8899132SKalle Valo void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
111*d8899132SKalle Valo {
112*d8899132SKalle Valo struct ath12k_peer *peer;
113*d8899132SKalle Valo
114*d8899132SKalle Valo spin_lock_bh(&ab->base_lock);
115*d8899132SKalle Valo
116*d8899132SKalle Valo peer = ath12k_peer_find_by_id(ab, peer_id);
117*d8899132SKalle Valo if (!peer) {
118*d8899132SKalle Valo ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
119*d8899132SKalle Valo peer_id);
120*d8899132SKalle Valo goto exit;
121*d8899132SKalle Valo }
122*d8899132SKalle Valo
123*d8899132SKalle Valo ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
124*d8899132SKalle Valo peer->vdev_id, peer->addr, peer_id);
125*d8899132SKalle Valo
126*d8899132SKalle Valo list_del(&peer->list);
127*d8899132SKalle Valo kfree(peer);
128*d8899132SKalle Valo wake_up(&ab->peer_mapping_wq);
129*d8899132SKalle Valo
130*d8899132SKalle Valo exit:
131*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
132*d8899132SKalle Valo }
133*d8899132SKalle Valo
ath12k_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)134*d8899132SKalle Valo void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
135*d8899132SKalle Valo u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
136*d8899132SKalle Valo {
137*d8899132SKalle Valo struct ath12k_peer *peer;
138*d8899132SKalle Valo
139*d8899132SKalle Valo spin_lock_bh(&ab->base_lock);
140*d8899132SKalle Valo peer = ath12k_peer_find(ab, vdev_id, mac_addr);
141*d8899132SKalle Valo if (!peer) {
142*d8899132SKalle Valo peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
143*d8899132SKalle Valo if (!peer)
144*d8899132SKalle Valo goto exit;
145*d8899132SKalle Valo
146*d8899132SKalle Valo peer->vdev_id = vdev_id;
147*d8899132SKalle Valo peer->peer_id = peer_id;
148*d8899132SKalle Valo peer->ast_hash = ast_hash;
149*d8899132SKalle Valo peer->hw_peer_id = hw_peer_id;
150*d8899132SKalle Valo ether_addr_copy(peer->addr, mac_addr);
151*d8899132SKalle Valo list_add(&peer->list, &ab->peers);
152*d8899132SKalle Valo wake_up(&ab->peer_mapping_wq);
153*d8899132SKalle Valo }
154*d8899132SKalle Valo
155*d8899132SKalle Valo ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
156*d8899132SKalle Valo vdev_id, mac_addr, peer_id);
157*d8899132SKalle Valo
158*d8899132SKalle Valo exit:
159*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
160*d8899132SKalle Valo }
161*d8899132SKalle Valo
ath12k_wait_for_peer_common(struct ath12k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)162*d8899132SKalle Valo static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
163*d8899132SKalle Valo const u8 *addr, bool expect_mapped)
164*d8899132SKalle Valo {
165*d8899132SKalle Valo int ret;
166*d8899132SKalle Valo
167*d8899132SKalle Valo ret = wait_event_timeout(ab->peer_mapping_wq, ({
168*d8899132SKalle Valo bool mapped;
169*d8899132SKalle Valo
170*d8899132SKalle Valo spin_lock_bh(&ab->base_lock);
171*d8899132SKalle Valo mapped = !!ath12k_peer_find(ab, vdev_id, addr);
172*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
173*d8899132SKalle Valo
174*d8899132SKalle Valo (mapped == expect_mapped ||
175*d8899132SKalle Valo test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
176*d8899132SKalle Valo }), 3 * HZ);
177*d8899132SKalle Valo
178*d8899132SKalle Valo if (ret <= 0)
179*d8899132SKalle Valo return -ETIMEDOUT;
180*d8899132SKalle Valo
181*d8899132SKalle Valo return 0;
182*d8899132SKalle Valo }
183*d8899132SKalle Valo
ath12k_peer_cleanup(struct ath12k * ar,u32 vdev_id)184*d8899132SKalle Valo void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
185*d8899132SKalle Valo {
186*d8899132SKalle Valo struct ath12k_peer *peer, *tmp;
187*d8899132SKalle Valo struct ath12k_base *ab = ar->ab;
188*d8899132SKalle Valo
189*d8899132SKalle Valo lockdep_assert_held(&ar->conf_mutex);
190*d8899132SKalle Valo
191*d8899132SKalle Valo spin_lock_bh(&ab->base_lock);
192*d8899132SKalle Valo list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
193*d8899132SKalle Valo if (peer->vdev_id != vdev_id)
194*d8899132SKalle Valo continue;
195*d8899132SKalle Valo
196*d8899132SKalle Valo ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
197*d8899132SKalle Valo peer->addr, vdev_id);
198*d8899132SKalle Valo
199*d8899132SKalle Valo list_del(&peer->list);
200*d8899132SKalle Valo kfree(peer);
201*d8899132SKalle Valo ar->num_peers--;
202*d8899132SKalle Valo }
203*d8899132SKalle Valo
204*d8899132SKalle Valo spin_unlock_bh(&ab->base_lock);
205*d8899132SKalle Valo }
206*d8899132SKalle Valo
ath12k_wait_for_peer_deleted(struct ath12k * ar,int vdev_id,const u8 * addr)207*d8899132SKalle Valo static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
208*d8899132SKalle Valo {
209*d8899132SKalle Valo return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
210*d8899132SKalle Valo }
211*d8899132SKalle Valo
ath12k_wait_for_peer_delete_done(struct ath12k * ar,u32 vdev_id,const u8 * addr)212*d8899132SKalle Valo int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
213*d8899132SKalle Valo const u8 *addr)
214*d8899132SKalle Valo {
215*d8899132SKalle Valo int ret;
216*d8899132SKalle Valo unsigned long time_left;
217*d8899132SKalle Valo
218*d8899132SKalle Valo ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
219*d8899132SKalle Valo if (ret) {
220*d8899132SKalle Valo ath12k_warn(ar->ab, "failed wait for peer deleted");
221*d8899132SKalle Valo return ret;
222*d8899132SKalle Valo }
223*d8899132SKalle Valo
224*d8899132SKalle Valo time_left = wait_for_completion_timeout(&ar->peer_delete_done,
225*d8899132SKalle Valo 3 * HZ);
226*d8899132SKalle Valo if (time_left == 0) {
227*d8899132SKalle Valo ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
228*d8899132SKalle Valo return -ETIMEDOUT;
229*d8899132SKalle Valo }
230*d8899132SKalle Valo
231*d8899132SKalle Valo return 0;
232*d8899132SKalle Valo }
233*d8899132SKalle Valo
ath12k_peer_delete(struct ath12k * ar,u32 vdev_id,u8 * addr)234*d8899132SKalle Valo int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
235*d8899132SKalle Valo {
236*d8899132SKalle Valo int ret;
237*d8899132SKalle Valo
238*d8899132SKalle Valo lockdep_assert_held(&ar->conf_mutex);
239*d8899132SKalle Valo
240*d8899132SKalle Valo reinit_completion(&ar->peer_delete_done);
241*d8899132SKalle Valo
242*d8899132SKalle Valo ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
243*d8899132SKalle Valo if (ret) {
244*d8899132SKalle Valo ath12k_warn(ar->ab,
245*d8899132SKalle Valo "failed to delete peer vdev_id %d addr %pM ret %d\n",
246*d8899132SKalle Valo vdev_id, addr, ret);
247*d8899132SKalle Valo return ret;
248*d8899132SKalle Valo }
249*d8899132SKalle Valo
250*d8899132SKalle Valo ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
251*d8899132SKalle Valo if (ret)
252*d8899132SKalle Valo return ret;
253*d8899132SKalle Valo
254*d8899132SKalle Valo ar->num_peers--;
255*d8899132SKalle Valo
256*d8899132SKalle Valo return 0;
257*d8899132SKalle Valo }
258*d8899132SKalle Valo
ath12k_wait_for_peer_created(struct ath12k * ar,int vdev_id,const u8 * addr)259*d8899132SKalle Valo static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
260*d8899132SKalle Valo {
261*d8899132SKalle Valo return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
262*d8899132SKalle Valo }
263*d8899132SKalle Valo
ath12k_peer_create(struct ath12k * ar,struct ath12k_vif * arvif,struct ieee80211_sta * sta,struct ath12k_wmi_peer_create_arg * arg)264*d8899132SKalle Valo int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
265*d8899132SKalle Valo struct ieee80211_sta *sta,
266*d8899132SKalle Valo struct ath12k_wmi_peer_create_arg *arg)
267*d8899132SKalle Valo {
268*d8899132SKalle Valo struct ath12k_peer *peer;
269*d8899132SKalle Valo int ret;
270*d8899132SKalle Valo
271*d8899132SKalle Valo lockdep_assert_held(&ar->conf_mutex);
272*d8899132SKalle Valo
273*d8899132SKalle Valo if (ar->num_peers > (ar->max_num_peers - 1)) {
274*d8899132SKalle Valo ath12k_warn(ar->ab,
275*d8899132SKalle Valo "failed to create peer due to insufficient peer entry resource in firmware\n");
276*d8899132SKalle Valo return -ENOBUFS;
277*d8899132SKalle Valo }
278*d8899132SKalle Valo
279*d8899132SKalle Valo spin_lock_bh(&ar->ab->base_lock);
280*d8899132SKalle Valo peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
281*d8899132SKalle Valo if (peer) {
282*d8899132SKalle Valo spin_unlock_bh(&ar->ab->base_lock);
283*d8899132SKalle Valo return -EINVAL;
284*d8899132SKalle Valo }
285*d8899132SKalle Valo spin_unlock_bh(&ar->ab->base_lock);
286*d8899132SKalle Valo
287*d8899132SKalle Valo ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
288*d8899132SKalle Valo if (ret) {
289*d8899132SKalle Valo ath12k_warn(ar->ab,
290*d8899132SKalle Valo "failed to send peer create vdev_id %d ret %d\n",
291*d8899132SKalle Valo arg->vdev_id, ret);
292*d8899132SKalle Valo return ret;
293*d8899132SKalle Valo }
294*d8899132SKalle Valo
295*d8899132SKalle Valo ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
296*d8899132SKalle Valo arg->peer_addr);
297*d8899132SKalle Valo if (ret)
298*d8899132SKalle Valo return ret;
299*d8899132SKalle Valo
300*d8899132SKalle Valo spin_lock_bh(&ar->ab->base_lock);
301*d8899132SKalle Valo
302*d8899132SKalle Valo peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
303*d8899132SKalle Valo if (!peer) {
304*d8899132SKalle Valo spin_unlock_bh(&ar->ab->base_lock);
305*d8899132SKalle Valo ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
306*d8899132SKalle Valo arg->peer_addr, arg->vdev_id);
307*d8899132SKalle Valo
308*d8899132SKalle Valo reinit_completion(&ar->peer_delete_done);
309*d8899132SKalle Valo
310*d8899132SKalle Valo ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
311*d8899132SKalle Valo arg->vdev_id);
312*d8899132SKalle Valo if (ret) {
313*d8899132SKalle Valo ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
314*d8899132SKalle Valo arg->vdev_id, arg->peer_addr);
315*d8899132SKalle Valo return ret;
316*d8899132SKalle Valo }
317*d8899132SKalle Valo
318*d8899132SKalle Valo ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
319*d8899132SKalle Valo arg->peer_addr);
320*d8899132SKalle Valo if (ret)
321*d8899132SKalle Valo return ret;
322*d8899132SKalle Valo
323*d8899132SKalle Valo return -ENOENT;
324*d8899132SKalle Valo }
325*d8899132SKalle Valo
326*d8899132SKalle Valo peer->pdev_idx = ar->pdev_idx;
327*d8899132SKalle Valo peer->sta = sta;
328*d8899132SKalle Valo
329*d8899132SKalle Valo if (arvif->vif->type == NL80211_IFTYPE_STATION) {
330*d8899132SKalle Valo arvif->ast_hash = peer->ast_hash;
331*d8899132SKalle Valo arvif->ast_idx = peer->hw_peer_id;
332*d8899132SKalle Valo }
333*d8899132SKalle Valo
334*d8899132SKalle Valo peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
335*d8899132SKalle Valo peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
336*d8899132SKalle Valo
337*d8899132SKalle Valo ar->num_peers++;
338*d8899132SKalle Valo
339*d8899132SKalle Valo spin_unlock_bh(&ar->ab->base_lock);
340*d8899132SKalle Valo
341*d8899132SKalle Valo return 0;
342*d8899132SKalle Valo }
343