1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 
6 #include "core.h"
7 #include "peer.h"
8 #include "debug.h"
9 
10 struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
11 				     const u8 *addr)
12 {
13 	struct ath11k_peer *peer;
14 
15 	lockdep_assert_held(&ab->base_lock);
16 
17 	list_for_each_entry(peer, &ab->peers, list) {
18 		if (peer->vdev_id != vdev_id)
19 			continue;
20 		if (memcmp(peer->addr, addr, ETH_ALEN))
21 			continue;
22 
23 		return peer;
24 	}
25 
26 	return NULL;
27 }
28 
29 struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
30 					     const u8 *addr)
31 {
32 	struct ath11k_peer *peer;
33 
34 	lockdep_assert_held(&ab->base_lock);
35 
36 	list_for_each_entry(peer, &ab->peers, list) {
37 		if (memcmp(peer->addr, addr, ETH_ALEN))
38 			continue;
39 
40 		return peer;
41 	}
42 
43 	return NULL;
44 }
45 
46 struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
47 					   int peer_id)
48 {
49 	struct ath11k_peer *peer;
50 
51 	lockdep_assert_held(&ab->base_lock);
52 
53 	list_for_each_entry(peer, &ab->peers, list)
54 		if (peer_id == peer->peer_id)
55 			return peer;
56 
57 	return NULL;
58 }
59 
60 void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
61 {
62 	struct ath11k_peer *peer;
63 
64 	spin_lock_bh(&ab->base_lock);
65 
66 	peer = ath11k_peer_find_by_id(ab, peer_id);
67 	if (!peer) {
68 		ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
69 			    peer_id);
70 		goto exit;
71 	}
72 
73 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
74 		   peer->vdev_id, peer->addr, peer_id);
75 
76 	list_del(&peer->list);
77 	kfree(peer);
78 	wake_up(&ab->peer_mapping_wq);
79 
80 exit:
81 	spin_unlock_bh(&ab->base_lock);
82 }
83 
84 void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
85 			   u8 *mac_addr, u16 ast_hash)
86 {
87 	struct ath11k_peer *peer;
88 
89 	spin_lock_bh(&ab->base_lock);
90 	peer = ath11k_peer_find(ab, vdev_id, mac_addr);
91 	if (!peer) {
92 		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
93 		if (!peer)
94 			goto exit;
95 
96 		peer->vdev_id = vdev_id;
97 		peer->peer_id = peer_id;
98 		peer->ast_hash = ast_hash;
99 		ether_addr_copy(peer->addr, mac_addr);
100 		list_add(&peer->list, &ab->peers);
101 		wake_up(&ab->peer_mapping_wq);
102 	}
103 
104 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
105 		   vdev_id, mac_addr, peer_id);
106 
107 exit:
108 	spin_unlock_bh(&ab->base_lock);
109 }
110 
111 static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
112 				       const u8 *addr, bool expect_mapped)
113 {
114 	int ret;
115 
116 	ret = wait_event_timeout(ab->peer_mapping_wq, ({
117 				bool mapped;
118 
119 				spin_lock_bh(&ab->base_lock);
120 				mapped = !!ath11k_peer_find(ab, vdev_id, addr);
121 				spin_unlock_bh(&ab->base_lock);
122 
123 				(mapped == expect_mapped ||
124 				 test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
125 				}), 3 * HZ);
126 
127 	if (ret <= 0)
128 		return -ETIMEDOUT;
129 
130 	return 0;
131 }
132 
133 void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
134 {
135 	struct ath11k_peer *peer, *tmp;
136 	struct ath11k_base *ab = ar->ab;
137 
138 	lockdep_assert_held(&ar->conf_mutex);
139 
140 	spin_lock_bh(&ab->base_lock);
141 	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
142 		if (peer->vdev_id != vdev_id)
143 			continue;
144 
145 		ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
146 			    peer->addr, vdev_id);
147 
148 		list_del(&peer->list);
149 		kfree(peer);
150 		ar->num_peers--;
151 	}
152 
153 	spin_unlock_bh(&ab->base_lock);
154 }
155 
156 static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
157 {
158 	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
159 }
160 
161 int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
162 {
163 	int ret;
164 
165 	lockdep_assert_held(&ar->conf_mutex);
166 
167 	ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
168 	if (ret) {
169 		ath11k_warn(ar->ab,
170 			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
171 			    vdev_id, addr, ret);
172 		return ret;
173 	}
174 
175 	ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
176 	if (ret)
177 		return ret;
178 
179 	ar->num_peers--;
180 
181 	return 0;
182 }
183 
184 static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
185 {
186 	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
187 }
188 
189 int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
190 		       struct ieee80211_sta *sta, struct peer_create_params *param)
191 {
192 	struct ath11k_peer *peer;
193 	int ret;
194 
195 	lockdep_assert_held(&ar->conf_mutex);
196 
197 	if (ar->num_peers > (ar->max_num_peers - 1)) {
198 		ath11k_warn(ar->ab,
199 			    "failed to create peer due to insufficient peer entry resource in firmware\n");
200 		return -ENOBUFS;
201 	}
202 
203 	ret = ath11k_wmi_send_peer_create_cmd(ar, param);
204 	if (ret) {
205 		ath11k_warn(ar->ab,
206 			    "failed to send peer create vdev_id %d ret %d\n",
207 			    param->vdev_id, ret);
208 		return ret;
209 	}
210 
211 	ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
212 					   param->peer_addr);
213 	if (ret)
214 		return ret;
215 
216 	spin_lock_bh(&ar->ab->base_lock);
217 
218 	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
219 	if (!peer) {
220 		spin_unlock_bh(&ar->ab->base_lock);
221 		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
222 			    param->peer_addr, param->vdev_id);
223 		ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
224 						param->vdev_id);
225 		return -ENOENT;
226 	}
227 
228 	peer->sta = sta;
229 	arvif->ast_hash = peer->ast_hash;
230 
231 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
232 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
233 
234 	ar->num_peers++;
235 
236 	spin_unlock_bh(&ar->ab->base_lock);
237 
238 	return 0;
239 }
240