1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 */ 5 6 #include "core.h" 7 #include "debug.h" 8 9 static int ath11k_dbring_bufs_replenish(struct ath11k *ar, 10 struct ath11k_dbring *ring, 11 struct ath11k_dbring_element *buff) 12 { 13 struct ath11k_base *ab = ar->ab; 14 struct hal_srng *srng; 15 dma_addr_t paddr; 16 void *ptr_aligned, *ptr_unaligned, *desc; 17 int ret; 18 int buf_id; 19 u32 cookie; 20 21 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 22 23 lockdep_assert_held(&srng->lock); 24 25 ath11k_hal_srng_access_begin(ab, srng); 26 27 ptr_unaligned = buff->payload; 28 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); 29 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, 30 DMA_FROM_DEVICE); 31 32 ret = dma_mapping_error(ab->dev, paddr); 33 if (ret) 34 goto err; 35 36 spin_lock_bh(&ring->idr_lock); 37 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC); 38 spin_unlock_bh(&ring->idr_lock); 39 if (buf_id < 0) { 40 ret = -ENOBUFS; 41 goto err_dma_unmap; 42 } 43 44 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 45 if (!desc) { 46 ret = -ENOENT; 47 goto err_idr_remove; 48 } 49 50 buff->paddr = paddr; 51 52 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) | 53 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 54 55 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); 56 57 ath11k_hal_srng_access_end(ab, srng); 58 59 return 0; 60 61 err_idr_remove: 62 spin_lock_bh(&ring->idr_lock); 63 idr_remove(&ring->bufs_idr, buf_id); 64 spin_unlock_bh(&ring->idr_lock); 65 err_dma_unmap: 66 dma_unmap_single(ab->dev, paddr, ring->buf_sz, 67 DMA_FROM_DEVICE); 68 err: 69 ath11k_hal_srng_access_end(ab, srng); 70 return ret; 71 } 72 73 static int ath11k_dbring_fill_bufs(struct ath11k *ar, 74 struct ath11k_dbring *ring) 75 { 76 struct ath11k_dbring_element *buff; 77 struct hal_srng *srng; 78 int num_remain, req_entries, num_free; 79 u32 align; 80 int size, ret; 81 82 srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id]; 83 84 spin_lock_bh(&srng->lock); 85 86 num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true); 87 req_entries = min(num_free, ring->bufs_max); 88 num_remain = req_entries; 89 align = ring->buf_align; 90 size = sizeof(*buff) + ring->buf_sz + align - 1; 91 92 while (num_remain > 0) { 93 buff = kzalloc(size, GFP_ATOMIC); 94 if (!buff) 95 break; 96 97 ret = ath11k_dbring_bufs_replenish(ar, ring, buff); 98 if (ret) { 99 ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", 100 num_remain, req_entries); 101 kfree(buff); 102 break; 103 } 104 num_remain--; 105 } 106 107 spin_unlock_bh(&srng->lock); 108 109 return num_remain; 110 } 111 112 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar, 113 struct ath11k_dbring *ring, 114 enum wmi_direct_buffer_module id) 115 { 116 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0}; 117 int ret; 118 119 if (id >= WMI_DIRECT_BUF_MAX) 120 return -EINVAL; 121 122 param.pdev_id = DP_SW2HW_MACID(ring->pdev_id); 123 param.module_id = id; 124 param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); 125 param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); 126 param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); 127 param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); 128 param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); 129 param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); 130 param.num_elems = ring->bufs_max; 131 param.buf_size = ring->buf_sz; 132 param.num_resp_per_event = ring->num_resp_per_event; 133 param.event_timeout_ms = ring->event_timeout_ms; 134 135 ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m); 136 if (ret) { 137 ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); 138 return ret; 139 } 140 141 return 0; 142 } 143 144 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring, 145 u32 num_resp_per_event, u32 event_timeout_ms, 146 int (*handler)(struct ath11k *, 147 struct ath11k_dbring_data *)) 148 { 149 if (WARN_ON(!ring)) 150 return -EINVAL; 151 152 ring->num_resp_per_event = num_resp_per_event; 153 ring->event_timeout_ms = event_timeout_ms; 154 ring->handler = handler; 155 156 return 0; 157 } 158 159 int ath11k_dbring_buf_setup(struct ath11k *ar, 160 struct ath11k_dbring *ring, 161 struct ath11k_dbring_cap *db_cap) 162 { 163 struct ath11k_base *ab = ar->ab; 164 struct hal_srng *srng; 165 int ret; 166 167 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 168 ring->bufs_max = ring->refill_srng.size / 169 ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF); 170 171 ring->buf_sz = db_cap->min_buf_sz; 172 ring->buf_align = db_cap->min_buf_align; 173 ring->pdev_id = db_cap->pdev_id; 174 ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng); 175 ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng); 176 177 ret = ath11k_dbring_fill_bufs(ar, ring); 178 179 return ret; 180 } 181 182 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring, 183 int ring_num, int num_entries) 184 { 185 int ret; 186 187 ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, 188 ring_num, ar->pdev_idx, num_entries); 189 if (ret < 0) { 190 ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", 191 ret, ring_num); 192 goto err; 193 } 194 195 return 0; 196 err: 197 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 198 return ret; 199 } 200 201 int ath11k_dbring_get_cap(struct ath11k_base *ab, 202 u8 pdev_idx, 203 enum wmi_direct_buffer_module id, 204 struct ath11k_dbring_cap *db_cap) 205 { 206 int i; 207 208 if (!ab->num_db_cap || !ab->db_caps) 209 return -ENOENT; 210 211 if (id >= WMI_DIRECT_BUF_MAX) 212 return -EINVAL; 213 214 for (i = 0; i < ab->num_db_cap; i++) { 215 if (pdev_idx == ab->db_caps[i].pdev_id && 216 id == ab->db_caps[i].id) { 217 *db_cap = ab->db_caps[i]; 218 219 return 0; 220 } 221 } 222 223 return -ENOENT; 224 } 225 226 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, 227 struct ath11k_dbring_buf_release_event *ev) 228 { 229 struct ath11k_dbring *ring; 230 struct hal_srng *srng; 231 struct ath11k *ar; 232 struct ath11k_dbring_element *buff; 233 struct ath11k_dbring_data handler_data; 234 struct ath11k_buffer_addr desc; 235 u8 *vaddr_unalign; 236 u32 num_entry, num_buff_reaped; 237 u8 pdev_idx, rbm; 238 u32 cookie; 239 int buf_id; 240 int size; 241 dma_addr_t paddr; 242 int ret = 0; 243 244 pdev_idx = ev->fixed.pdev_id; 245 246 if (pdev_idx >= ab->num_radios) { 247 ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx); 248 return -EINVAL; 249 } 250 251 if (ev->fixed.num_buf_release_entry != 252 ev->fixed.num_meta_data_entry) { 253 ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", 254 ev->fixed.num_buf_release_entry, 255 ev->fixed.num_meta_data_entry); 256 return -EINVAL; 257 } 258 259 ar = ab->pdevs[pdev_idx].ar; 260 261 rcu_read_lock(); 262 if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { 263 ret = -EINVAL; 264 goto rcu_unlock; 265 } 266 267 switch (ev->fixed.module_id) { 268 case WMI_DIRECT_BUF_SPECTRAL: 269 ring = ath11k_spectral_get_dbring(ar); 270 break; 271 default: 272 ring = NULL; 273 ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", 274 ev->fixed.module_id); 275 break; 276 } 277 278 if (!ring) { 279 ret = -EINVAL; 280 goto rcu_unlock; 281 } 282 283 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 284 num_entry = ev->fixed.num_buf_release_entry; 285 size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; 286 num_buff_reaped = 0; 287 288 spin_lock_bh(&srng->lock); 289 290 while (num_buff_reaped < num_entry) { 291 desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; 292 desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; 293 handler_data.meta = ev->meta_data[num_buff_reaped]; 294 295 num_buff_reaped++; 296 297 ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); 298 299 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 300 301 spin_lock_bh(&ring->idr_lock); 302 buff = idr_find(&ring->bufs_idr, buf_id); 303 if (!buff) { 304 spin_unlock_bh(&ring->idr_lock); 305 continue; 306 } 307 idr_remove(&ring->bufs_idr, buf_id); 308 spin_unlock_bh(&ring->idr_lock); 309 310 dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, 311 DMA_FROM_DEVICE); 312 313 if (ring->handler) { 314 vaddr_unalign = buff->payload; 315 handler_data.data = PTR_ALIGN(vaddr_unalign, 316 ring->buf_align); 317 handler_data.data_sz = ring->buf_sz; 318 319 ring->handler(ar, &handler_data); 320 } 321 322 memset(buff, 0, size); 323 ath11k_dbring_bufs_replenish(ar, ring, buff); 324 } 325 326 spin_unlock_bh(&srng->lock); 327 328 rcu_unlock: 329 rcu_read_unlock(); 330 331 return ret; 332 } 333 334 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) 335 { 336 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 337 } 338 339 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) 340 { 341 struct ath11k_dbring_element *buff; 342 int buf_id; 343 344 spin_lock_bh(&ring->idr_lock); 345 idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { 346 idr_remove(&ring->bufs_idr, buf_id); 347 dma_unmap_single(ar->ab->dev, buff->paddr, 348 ring->buf_sz, DMA_FROM_DEVICE); 349 kfree(buff); 350 } 351 352 idr_destroy(&ring->bufs_idr); 353 spin_unlock_bh(&ring->idr_lock); 354 } 355