1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 #include <linux/dma-mapping.h> 6 #include "hal_tx.h" 7 #include "debug.h" 8 #include "hal_desc.h" 9 #include "hif.h" 10 11 static const struct hal_srng_config hw_srng_config_template[] = { 12 /* TODO: max_rings can populated by querying HW capabilities */ 13 { /* REO_DST */ 14 .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, 15 .max_rings = 4, 16 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, 17 .lmac_ring = false, 18 .ring_dir = HAL_SRNG_DIR_DST, 19 .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, 20 }, 21 { /* REO_EXCEPTION */ 22 /* Designating REO2TCL ring as exception ring. This ring is 23 * similar to other REO2SW rings though it is named as REO2TCL. 24 * Any of theREO2SW rings can be used as exception ring. 25 */ 26 .start_ring_id = HAL_SRNG_RING_ID_REO2TCL, 27 .max_rings = 1, 28 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, 29 .lmac_ring = false, 30 .ring_dir = HAL_SRNG_DIR_DST, 31 .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE, 32 }, 33 { /* REO_REINJECT */ 34 .start_ring_id = HAL_SRNG_RING_ID_SW2REO, 35 .max_rings = 1, 36 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 37 .lmac_ring = false, 38 .ring_dir = HAL_SRNG_DIR_SRC, 39 .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, 40 }, 41 { /* REO_CMD */ 42 .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, 43 .max_rings = 1, 44 .entry_size = (sizeof(struct hal_tlv_hdr) + 45 sizeof(struct hal_reo_get_queue_stats)) >> 2, 46 .lmac_ring = false, 47 .ring_dir = HAL_SRNG_DIR_SRC, 48 .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, 49 }, 50 { /* REO_STATUS */ 51 .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, 52 .max_rings = 1, 53 .entry_size = (sizeof(struct hal_tlv_hdr) + 54 sizeof(struct hal_reo_get_queue_stats_status)) >> 2, 55 .lmac_ring = false, 56 .ring_dir = HAL_SRNG_DIR_DST, 57 .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, 58 }, 59 { /* TCL_DATA */ 60 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, 61 .max_rings = 3, 62 .entry_size = (sizeof(struct hal_tlv_hdr) + 63 sizeof(struct hal_tcl_data_cmd)) >> 2, 64 .lmac_ring = false, 65 .ring_dir = HAL_SRNG_DIR_SRC, 66 .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, 67 }, 68 { /* TCL_CMD */ 69 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, 70 .max_rings = 1, 71 .entry_size = (sizeof(struct hal_tlv_hdr) + 72 sizeof(struct hal_tcl_gse_cmd)) >> 2, 73 .lmac_ring = false, 74 .ring_dir = HAL_SRNG_DIR_SRC, 75 .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, 76 }, 77 { /* TCL_STATUS */ 78 .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, 79 .max_rings = 1, 80 .entry_size = (sizeof(struct hal_tlv_hdr) + 81 sizeof(struct hal_tcl_status_ring)) >> 2, 82 .lmac_ring = false, 83 .ring_dir = HAL_SRNG_DIR_DST, 84 .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, 85 }, 86 { /* CE_SRC */ 87 .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, 88 .max_rings = 12, 89 .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, 90 .lmac_ring = false, 91 .ring_dir = HAL_SRNG_DIR_SRC, 92 .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, 93 }, 94 { /* CE_DST */ 95 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, 96 .max_rings = 12, 97 .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, 98 .lmac_ring = false, 99 .ring_dir = HAL_SRNG_DIR_SRC, 100 .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, 101 }, 102 { /* CE_DST_STATUS */ 103 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, 104 .max_rings = 12, 105 .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, 106 .lmac_ring = false, 107 .ring_dir = HAL_SRNG_DIR_DST, 108 .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, 109 }, 110 { /* WBM_IDLE_LINK */ 111 .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, 112 .max_rings = 1, 113 .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, 114 .lmac_ring = false, 115 .ring_dir = HAL_SRNG_DIR_SRC, 116 .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, 117 }, 118 { /* SW2WBM_RELEASE */ 119 .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE, 120 .max_rings = 1, 121 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, 122 .lmac_ring = false, 123 .ring_dir = HAL_SRNG_DIR_SRC, 124 .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, 125 }, 126 { /* WBM2SW_RELEASE */ 127 .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, 128 .max_rings = 4, 129 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, 130 .lmac_ring = false, 131 .ring_dir = HAL_SRNG_DIR_DST, 132 .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, 133 }, 134 { /* RXDMA_BUF */ 135 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF, 136 .max_rings = 2, 137 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 138 .lmac_ring = true, 139 .ring_dir = HAL_SRNG_DIR_SRC, 140 .max_size = HAL_RXDMA_RING_MAX_SIZE, 141 }, 142 { /* RXDMA_DST */ 143 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, 144 .max_rings = 1, 145 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 146 .lmac_ring = true, 147 .ring_dir = HAL_SRNG_DIR_DST, 148 .max_size = HAL_RXDMA_RING_MAX_SIZE, 149 }, 150 { /* RXDMA_MONITOR_BUF */ 151 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, 152 .max_rings = 1, 153 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 154 .lmac_ring = true, 155 .ring_dir = HAL_SRNG_DIR_SRC, 156 .max_size = HAL_RXDMA_RING_MAX_SIZE, 157 }, 158 { /* RXDMA_MONITOR_STATUS */ 159 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, 160 .max_rings = 1, 161 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 162 .lmac_ring = true, 163 .ring_dir = HAL_SRNG_DIR_SRC, 164 .max_size = HAL_RXDMA_RING_MAX_SIZE, 165 }, 166 { /* RXDMA_MONITOR_DST */ 167 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, 168 .max_rings = 1, 169 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 170 .lmac_ring = true, 171 .ring_dir = HAL_SRNG_DIR_DST, 172 .max_size = HAL_RXDMA_RING_MAX_SIZE, 173 }, 174 { /* RXDMA_MONITOR_DESC */ 175 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, 176 .max_rings = 1, 177 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 178 .lmac_ring = true, 179 .ring_dir = HAL_SRNG_DIR_SRC, 180 .max_size = HAL_RXDMA_RING_MAX_SIZE, 181 }, 182 { /* RXDMA DIR BUF */ 183 .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, 184 .max_rings = 1, 185 .entry_size = 8 >> 2, /* TODO: Define the struct */ 186 .lmac_ring = true, 187 .ring_dir = HAL_SRNG_DIR_SRC, 188 .max_size = HAL_RXDMA_RING_MAX_SIZE, 189 }, 190 }; 191 192 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab) 193 { 194 struct ath11k_hal *hal = &ab->hal; 195 size_t size; 196 197 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; 198 hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, 199 GFP_KERNEL); 200 if (!hal->rdp.vaddr) 201 return -ENOMEM; 202 203 return 0; 204 } 205 206 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab) 207 { 208 struct ath11k_hal *hal = &ab->hal; 209 size_t size; 210 211 if (!hal->rdp.vaddr) 212 return; 213 214 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; 215 dma_free_coherent(ab->dev, size, 216 hal->rdp.vaddr, hal->rdp.paddr); 217 hal->rdp.vaddr = NULL; 218 } 219 220 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab) 221 { 222 struct ath11k_hal *hal = &ab->hal; 223 size_t size; 224 225 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; 226 hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, 227 GFP_KERNEL); 228 if (!hal->wrp.vaddr) 229 return -ENOMEM; 230 231 return 0; 232 } 233 234 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab) 235 { 236 struct ath11k_hal *hal = &ab->hal; 237 size_t size; 238 239 if (!hal->wrp.vaddr) 240 return; 241 242 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; 243 dma_free_coherent(ab->dev, size, 244 hal->wrp.vaddr, hal->wrp.paddr); 245 hal->wrp.vaddr = NULL; 246 } 247 248 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, 249 struct hal_srng *srng, int ring_num) 250 { 251 struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST]; 252 u32 addr; 253 u32 val; 254 255 addr = HAL_CE_DST_RING_CTRL + 256 srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + 257 ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; 258 259 val = ath11k_hif_read32(ab, addr); 260 val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; 261 val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN, 262 srng->u.dst_ring.max_buffer_length); 263 ath11k_hif_write32(ab, addr, val); 264 } 265 266 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, 267 struct hal_srng *srng) 268 { 269 struct ath11k_hal *hal = &ab->hal; 270 u32 val; 271 u64 hp_addr; 272 u32 reg_base; 273 274 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 275 276 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { 277 ath11k_hif_write32(ab, reg_base + 278 HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab), 279 srng->msi_addr); 280 281 val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, 282 ((u64)srng->msi_addr >> 283 HAL_ADDR_MSB_REG_SHIFT)) | 284 HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; 285 ath11k_hif_write32(ab, reg_base + 286 HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val); 287 288 ath11k_hif_write32(ab, 289 reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab), 290 srng->msi_data); 291 } 292 293 ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); 294 295 val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, 296 ((u64)srng->ring_base_paddr >> 297 HAL_ADDR_MSB_REG_SHIFT)) | 298 FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE, 299 (srng->entry_size * srng->num_entries)); 300 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val); 301 302 val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) | 303 FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); 304 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val); 305 306 /* interrupt setup */ 307 val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD, 308 (srng->intr_timer_thres_us >> 3)); 309 310 val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD, 311 (srng->intr_batch_cntr_thres_entries * 312 srng->entry_size)); 313 314 ath11k_hif_write32(ab, 315 reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab), 316 val); 317 318 hp_addr = hal->rdp.paddr + 319 ((unsigned long)srng->u.dst_ring.hp_addr - 320 (unsigned long)hal->rdp.vaddr); 321 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab), 322 hp_addr & HAL_ADDR_LSB_REG_MASK); 323 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab), 324 hp_addr >> HAL_ADDR_MSB_REG_SHIFT); 325 326 /* Initialize head and tail pointers to indicate ring is empty */ 327 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 328 ath11k_hif_write32(ab, reg_base, 0); 329 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0); 330 *srng->u.dst_ring.hp_addr = 0; 331 332 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 333 val = 0; 334 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) 335 val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP; 336 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) 337 val |= HAL_REO1_RING_MISC_HOST_FW_SWAP; 338 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) 339 val |= HAL_REO1_RING_MISC_MSI_SWAP; 340 val |= HAL_REO1_RING_MISC_SRNG_ENABLE; 341 342 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val); 343 } 344 345 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, 346 struct hal_srng *srng) 347 { 348 struct ath11k_hal *hal = &ab->hal; 349 u32 val; 350 u64 tp_addr; 351 u32 reg_base; 352 353 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 354 355 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { 356 ath11k_hif_write32(ab, reg_base + 357 HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab), 358 srng->msi_addr); 359 360 val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, 361 ((u64)srng->msi_addr >> 362 HAL_ADDR_MSB_REG_SHIFT)) | 363 HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; 364 ath11k_hif_write32(ab, reg_base + 365 HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab), 366 val); 367 368 ath11k_hif_write32(ab, reg_base + 369 HAL_TCL1_RING_MSI1_DATA_OFFSET(ab), 370 srng->msi_data); 371 } 372 373 ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); 374 375 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, 376 ((u64)srng->ring_base_paddr >> 377 HAL_ADDR_MSB_REG_SHIFT)) | 378 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, 379 (srng->entry_size * srng->num_entries)); 380 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); 381 382 val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); 383 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val); 384 385 if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) { 386 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); 387 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, 388 ((u64)srng->ring_base_paddr >> 389 HAL_ADDR_MSB_REG_SHIFT)) | 390 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, 391 (srng->entry_size * srng->num_entries)); 392 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); 393 } 394 395 /* interrupt setup */ 396 /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the 397 * unit of 8 usecs instead of 1 usec (as required by v1). 398 */ 399 val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD, 400 srng->intr_timer_thres_us); 401 402 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD, 403 (srng->intr_batch_cntr_thres_entries * 404 srng->entry_size)); 405 406 ath11k_hif_write32(ab, 407 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab), 408 val); 409 410 val = 0; 411 if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { 412 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD, 413 srng->u.src_ring.low_threshold); 414 } 415 ath11k_hif_write32(ab, 416 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab), 417 val); 418 419 if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) { 420 tp_addr = hal->rdp.paddr + 421 ((unsigned long)srng->u.src_ring.tp_addr - 422 (unsigned long)hal->rdp.vaddr); 423 ath11k_hif_write32(ab, 424 reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab), 425 tp_addr & HAL_ADDR_LSB_REG_MASK); 426 ath11k_hif_write32(ab, 427 reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab), 428 tp_addr >> HAL_ADDR_MSB_REG_SHIFT); 429 } 430 431 /* Initialize head and tail pointers to indicate ring is empty */ 432 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 433 ath11k_hif_write32(ab, reg_base, 0); 434 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); 435 *srng->u.src_ring.tp_addr = 0; 436 437 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 438 val = 0; 439 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) 440 val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP; 441 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) 442 val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP; 443 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) 444 val |= HAL_TCL1_RING_MISC_MSI_SWAP; 445 446 /* Loop count is not used for SRC rings */ 447 val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE; 448 449 val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; 450 451 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val); 452 } 453 454 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, 455 struct hal_srng *srng) 456 { 457 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 458 ath11k_hal_srng_src_hw_init(ab, srng); 459 else 460 ath11k_hal_srng_dst_hw_init(ab, srng); 461 } 462 463 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab, 464 enum hal_ring_type type, 465 int ring_num, int mac_id) 466 { 467 struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; 468 int ring_id; 469 470 if (ring_num >= srng_config->max_rings) { 471 ath11k_warn(ab, "invalid ring number :%d\n", ring_num); 472 return -EINVAL; 473 } 474 475 ring_id = srng_config->start_ring_id + ring_num; 476 if (srng_config->lmac_ring) 477 ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC; 478 479 if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX)) 480 return -EINVAL; 481 482 return ring_id; 483 } 484 485 int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type) 486 { 487 struct hal_srng_config *srng_config; 488 489 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) 490 return -EINVAL; 491 492 srng_config = &ab->hal.srng_config[ring_type]; 493 494 return (srng_config->entry_size << 2); 495 } 496 497 int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type) 498 { 499 struct hal_srng_config *srng_config; 500 501 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) 502 return -EINVAL; 503 504 srng_config = &ab->hal.srng_config[ring_type]; 505 506 return (srng_config->max_size / srng_config->entry_size); 507 } 508 509 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, 510 struct hal_srng_params *params) 511 { 512 params->ring_base_paddr = srng->ring_base_paddr; 513 params->ring_base_vaddr = srng->ring_base_vaddr; 514 params->num_entries = srng->num_entries; 515 params->intr_timer_thres_us = srng->intr_timer_thres_us; 516 params->intr_batch_cntr_thres_entries = 517 srng->intr_batch_cntr_thres_entries; 518 params->low_threshold = srng->u.src_ring.low_threshold; 519 params->msi_addr = srng->msi_addr; 520 params->msi_data = srng->msi_data; 521 params->flags = srng->flags; 522 } 523 524 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, 525 struct hal_srng *srng) 526 { 527 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) 528 return 0; 529 530 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 531 return ab->hal.wrp.paddr + 532 ((unsigned long)srng->u.src_ring.hp_addr - 533 (unsigned long)ab->hal.wrp.vaddr); 534 else 535 return ab->hal.rdp.paddr + 536 ((unsigned long)srng->u.dst_ring.hp_addr - 537 (unsigned long)ab->hal.rdp.vaddr); 538 } 539 540 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, 541 struct hal_srng *srng) 542 { 543 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) 544 return 0; 545 546 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 547 return ab->hal.rdp.paddr + 548 ((unsigned long)srng->u.src_ring.tp_addr - 549 (unsigned long)ab->hal.rdp.vaddr); 550 else 551 return ab->hal.wrp.paddr + 552 ((unsigned long)srng->u.dst_ring.tp_addr - 553 (unsigned long)ab->hal.wrp.vaddr); 554 } 555 556 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) 557 { 558 switch (type) { 559 case HAL_CE_DESC_SRC: 560 return sizeof(struct hal_ce_srng_src_desc); 561 case HAL_CE_DESC_DST: 562 return sizeof(struct hal_ce_srng_dest_desc); 563 case HAL_CE_DESC_DST_STATUS: 564 return sizeof(struct hal_ce_srng_dst_status_desc); 565 } 566 567 return 0; 568 } 569 570 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, 571 u8 byte_swap_data) 572 { 573 struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf; 574 575 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; 576 desc->buffer_addr_info = 577 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI, 578 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 579 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP, 580 byte_swap_data) | 581 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) | 582 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len); 583 desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id); 584 } 585 586 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) 587 { 588 struct hal_ce_srng_dest_desc *desc = 589 (struct hal_ce_srng_dest_desc *)buf; 590 591 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; 592 desc->buffer_addr_info = 593 FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI, 594 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)); 595 } 596 597 u32 ath11k_hal_ce_dst_status_get_length(void *buf) 598 { 599 struct hal_ce_srng_dst_status_desc *desc = 600 (struct hal_ce_srng_dst_status_desc *)buf; 601 u32 len; 602 603 len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); 604 desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; 605 606 return len; 607 } 608 609 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, 610 dma_addr_t paddr) 611 { 612 desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 613 (paddr & HAL_ADDR_LSB_REG_MASK)); 614 desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, 615 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 616 FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) | 617 FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie); 618 } 619 620 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) 621 { 622 lockdep_assert_held(&srng->lock); 623 624 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) 625 return (srng->ring_base_vaddr + srng->u.dst_ring.tp); 626 627 return NULL; 628 } 629 630 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, 631 struct hal_srng *srng) 632 { 633 u32 *desc; 634 635 lockdep_assert_held(&srng->lock); 636 637 if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) 638 return NULL; 639 640 desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; 641 642 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % 643 srng->ring_size; 644 645 return desc; 646 } 647 648 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, 649 bool sync_hw_ptr) 650 { 651 u32 tp, hp; 652 653 lockdep_assert_held(&srng->lock); 654 655 tp = srng->u.dst_ring.tp; 656 657 if (sync_hw_ptr) { 658 hp = *srng->u.dst_ring.hp_addr; 659 srng->u.dst_ring.cached_hp = hp; 660 } else { 661 hp = srng->u.dst_ring.cached_hp; 662 } 663 664 if (hp >= tp) 665 return (hp - tp) / srng->entry_size; 666 else 667 return (srng->ring_size - tp + hp) / srng->entry_size; 668 } 669 670 /* Returns number of available entries in src ring */ 671 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, 672 bool sync_hw_ptr) 673 { 674 u32 tp, hp; 675 676 lockdep_assert_held(&srng->lock); 677 678 hp = srng->u.src_ring.hp; 679 680 if (sync_hw_ptr) { 681 tp = *srng->u.src_ring.tp_addr; 682 srng->u.src_ring.cached_tp = tp; 683 } else { 684 tp = srng->u.src_ring.cached_tp; 685 } 686 687 if (tp > hp) 688 return ((tp - hp) / srng->entry_size) - 1; 689 else 690 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; 691 } 692 693 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, 694 struct hal_srng *srng) 695 { 696 u32 *desc; 697 u32 next_hp; 698 699 lockdep_assert_held(&srng->lock); 700 701 /* TODO: Using % is expensive, but we have to do this since size of some 702 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see 703 * if separate function is defined for rings having power of 2 ring size 704 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the 705 * overhead of % by using mask (with &). 706 */ 707 next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; 708 709 if (next_hp == srng->u.src_ring.cached_tp) 710 return NULL; 711 712 desc = srng->ring_base_vaddr + srng->u.src_ring.hp; 713 srng->u.src_ring.hp = next_hp; 714 715 /* TODO: Reap functionality is not used by all rings. If particular 716 * ring does not use reap functionality, we need not update reap_hp 717 * with next_hp pointer. Need to make sure a separate function is used 718 * before doing any optimization by removing below code updating 719 * reap_hp. 720 */ 721 srng->u.src_ring.reap_hp = next_hp; 722 723 return desc; 724 } 725 726 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, 727 struct hal_srng *srng) 728 { 729 u32 *desc; 730 u32 next_reap_hp; 731 732 lockdep_assert_held(&srng->lock); 733 734 next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % 735 srng->ring_size; 736 737 if (next_reap_hp == srng->u.src_ring.cached_tp) 738 return NULL; 739 740 desc = srng->ring_base_vaddr + next_reap_hp; 741 srng->u.src_ring.reap_hp = next_reap_hp; 742 743 return desc; 744 } 745 746 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, 747 struct hal_srng *srng) 748 { 749 u32 *desc; 750 751 lockdep_assert_held(&srng->lock); 752 753 if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) 754 return NULL; 755 756 desc = srng->ring_base_vaddr + srng->u.src_ring.hp; 757 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % 758 srng->ring_size; 759 760 return desc; 761 } 762 763 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) 764 { 765 lockdep_assert_held(&srng->lock); 766 767 if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == 768 srng->u.src_ring.cached_tp) 769 return NULL; 770 771 return srng->ring_base_vaddr + srng->u.src_ring.hp; 772 } 773 774 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) 775 { 776 lockdep_assert_held(&srng->lock); 777 778 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 779 srng->u.src_ring.cached_tp = 780 *(volatile u32 *)srng->u.src_ring.tp_addr; 781 else 782 srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; 783 } 784 785 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() 786 * should have been called before this. 787 */ 788 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) 789 { 790 lockdep_assert_held(&srng->lock); 791 792 /* TODO: See if we need a write memory barrier here */ 793 if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { 794 /* For LMAC rings, ring pointer updates are done through FW and 795 * hence written to a shared memory location that is read by FW 796 */ 797 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 798 srng->u.src_ring.last_tp = 799 *(volatile u32 *)srng->u.src_ring.tp_addr; 800 *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; 801 } else { 802 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; 803 *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; 804 } 805 } else { 806 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 807 srng->u.src_ring.last_tp = 808 *(volatile u32 *)srng->u.src_ring.tp_addr; 809 ath11k_hif_write32(ab, 810 (unsigned long)srng->u.src_ring.hp_addr - 811 (unsigned long)ab->mem, 812 srng->u.src_ring.hp); 813 } else { 814 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; 815 ath11k_hif_write32(ab, 816 (unsigned long)srng->u.dst_ring.tp_addr - 817 (unsigned long)ab->mem, 818 srng->u.dst_ring.tp); 819 } 820 } 821 822 srng->timestamp = jiffies; 823 } 824 825 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, 826 struct hal_wbm_idle_scatter_list *sbuf, 827 u32 nsbufs, u32 tot_link_desc, 828 u32 end_offset) 829 { 830 struct ath11k_buffer_addr *link_addr; 831 int i; 832 u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64; 833 834 link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; 835 836 for (i = 1; i < nsbufs; i++) { 837 link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK; 838 link_addr->info1 = FIELD_PREP( 839 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 840 (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) | 841 FIELD_PREP( 842 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, 843 BASE_ADDR_MATCH_TAG_VAL); 844 845 link_addr = (void *)sbuf[i].vaddr + 846 HAL_WBM_IDLE_SCATTER_BUF_SIZE; 847 } 848 849 ath11k_hif_write32(ab, 850 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR, 851 FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) | 852 FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1)); 853 ath11k_hif_write32(ab, 854 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR, 855 FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, 856 reg_scatter_buf_sz * nsbufs)); 857 ath11k_hif_write32(ab, 858 HAL_SEQ_WCSS_UMAC_WBM_REG + 859 HAL_WBM_SCATTERED_RING_BASE_LSB, 860 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 861 sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK)); 862 ath11k_hif_write32(ab, 863 HAL_SEQ_WCSS_UMAC_WBM_REG + 864 HAL_WBM_SCATTERED_RING_BASE_MSB, 865 FIELD_PREP( 866 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 867 (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) | 868 FIELD_PREP( 869 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, 870 BASE_ADDR_MATCH_TAG_VAL)); 871 872 /* Setup head and tail pointers for the idle list */ 873 ath11k_hif_write32(ab, 874 HAL_SEQ_WCSS_UMAC_WBM_REG + 875 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, 876 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 877 sbuf[nsbufs - 1].paddr)); 878 ath11k_hif_write32(ab, 879 HAL_SEQ_WCSS_UMAC_WBM_REG + 880 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1, 881 FIELD_PREP( 882 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 883 ((u64)sbuf[nsbufs - 1].paddr >> 884 HAL_ADDR_MSB_REG_SHIFT)) | 885 FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1, 886 (end_offset >> 2))); 887 ath11k_hif_write32(ab, 888 HAL_SEQ_WCSS_UMAC_WBM_REG + 889 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, 890 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 891 sbuf[0].paddr)); 892 893 ath11k_hif_write32(ab, 894 HAL_SEQ_WCSS_UMAC_WBM_REG + 895 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0, 896 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 897 sbuf[0].paddr)); 898 ath11k_hif_write32(ab, 899 HAL_SEQ_WCSS_UMAC_WBM_REG + 900 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1, 901 FIELD_PREP( 902 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 903 ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 904 FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 905 0)); 906 ath11k_hif_write32(ab, 907 HAL_SEQ_WCSS_UMAC_WBM_REG + 908 HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR, 909 2 * tot_link_desc); 910 911 /* Enable the SRNG */ 912 ath11k_hif_write32(ab, 913 HAL_SEQ_WCSS_UMAC_WBM_REG + 914 HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40); 915 } 916 917 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, 918 int ring_num, int mac_id, 919 struct hal_srng_params *params) 920 { 921 struct ath11k_hal *hal = &ab->hal; 922 struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; 923 struct hal_srng *srng; 924 int ring_id; 925 u32 lmac_idx; 926 int i; 927 u32 reg_base; 928 929 ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); 930 if (ring_id < 0) 931 return ring_id; 932 933 srng = &hal->srng_list[ring_id]; 934 935 srng->ring_id = ring_id; 936 srng->ring_dir = srng_config->ring_dir; 937 srng->ring_base_paddr = params->ring_base_paddr; 938 srng->ring_base_vaddr = params->ring_base_vaddr; 939 srng->entry_size = srng_config->entry_size; 940 srng->num_entries = params->num_entries; 941 srng->ring_size = srng->entry_size * srng->num_entries; 942 srng->intr_batch_cntr_thres_entries = 943 params->intr_batch_cntr_thres_entries; 944 srng->intr_timer_thres_us = params->intr_timer_thres_us; 945 srng->flags = params->flags; 946 srng->msi_addr = params->msi_addr; 947 srng->msi_data = params->msi_data; 948 srng->initialized = 1; 949 spin_lock_init(&srng->lock); 950 951 for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { 952 srng->hwreg_base[i] = srng_config->reg_start[i] + 953 (ring_num * srng_config->reg_size[i]); 954 } 955 956 memset(srng->ring_base_vaddr, 0, 957 (srng->entry_size * srng->num_entries) << 2); 958 959 /* TODO: Add comments on these swap configurations */ 960 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 961 srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP | 962 HAL_SRNG_FLAGS_RING_PTR_SWAP; 963 964 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 965 966 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 967 srng->u.src_ring.hp = 0; 968 srng->u.src_ring.cached_tp = 0; 969 srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; 970 srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); 971 srng->u.src_ring.low_threshold = params->low_threshold * 972 srng->entry_size; 973 if (srng_config->lmac_ring) { 974 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; 975 srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + 976 lmac_idx); 977 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; 978 } else { 979 if (!ab->hw_params.supports_shadow_regs) 980 srng->u.src_ring.hp_addr = 981 (u32 *)((unsigned long)ab->mem + reg_base); 982 else 983 ath11k_dbg(ab, ATH11k_DBG_HAL, 984 "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n", 985 type, ring_num, 986 reg_base, 987 (unsigned long)srng->u.src_ring.hp_addr - 988 (unsigned long)ab->mem); 989 } 990 } else { 991 /* During initialization loop count in all the descriptors 992 * will be set to zero, and HW will set it to 1 on completing 993 * descriptor update in first loop, and increments it by 1 on 994 * subsequent loops (loop count wraps around after reaching 995 * 0xffff). The 'loop_cnt' in SW ring state is the expected 996 * loop count in descriptors updated by HW (to be processed 997 * by SW). 998 */ 999 srng->u.dst_ring.loop_cnt = 1; 1000 srng->u.dst_ring.tp = 0; 1001 srng->u.dst_ring.cached_hp = 0; 1002 srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); 1003 if (srng_config->lmac_ring) { 1004 /* For LMAC rings, tail pointer updates will be done 1005 * through FW by writing to a shared memory location 1006 */ 1007 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; 1008 srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + 1009 lmac_idx); 1010 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; 1011 } else { 1012 if (!ab->hw_params.supports_shadow_regs) 1013 srng->u.dst_ring.tp_addr = 1014 (u32 *)((unsigned long)ab->mem + reg_base + 1015 (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))); 1016 else 1017 ath11k_dbg(ab, ATH11k_DBG_HAL, 1018 "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n", 1019 type, ring_num, 1020 reg_base + (HAL_REO1_RING_TP(ab) - 1021 HAL_REO1_RING_HP(ab)), 1022 (unsigned long)srng->u.dst_ring.tp_addr - 1023 (unsigned long)ab->mem); 1024 } 1025 } 1026 1027 if (srng_config->lmac_ring) 1028 return ring_id; 1029 1030 ath11k_hal_srng_hw_init(ab, srng); 1031 1032 if (type == HAL_CE_DST) { 1033 srng->u.dst_ring.max_buffer_length = params->max_buffer_len; 1034 ath11k_hal_ce_dst_setup(ab, srng, ring_num); 1035 } 1036 1037 return ring_id; 1038 } 1039 1040 static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab, 1041 int shadow_cfg_idx, 1042 enum hal_ring_type ring_type, 1043 int ring_num) 1044 { 1045 struct hal_srng *srng; 1046 struct ath11k_hal *hal = &ab->hal; 1047 int ring_id; 1048 struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; 1049 1050 ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0); 1051 if (ring_id < 0) 1052 return; 1053 1054 srng = &hal->srng_list[ring_id]; 1055 1056 if (srng_config->ring_dir == HAL_SRNG_DIR_DST) 1057 srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + 1058 (unsigned long)ab->mem); 1059 else 1060 srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + 1061 (unsigned long)ab->mem); 1062 } 1063 1064 int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, 1065 enum hal_ring_type ring_type, 1066 int ring_num) 1067 { 1068 struct ath11k_hal *hal = &ab->hal; 1069 struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; 1070 int shadow_cfg_idx = hal->num_shadow_reg_configured; 1071 u32 target_reg; 1072 1073 if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS) 1074 return -EINVAL; 1075 1076 hal->num_shadow_reg_configured++; 1077 1078 target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START]; 1079 target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] * 1080 ring_num; 1081 1082 /* For destination ring, shadow the TP */ 1083 if (srng_config->ring_dir == HAL_SRNG_DIR_DST) 1084 target_reg += HAL_OFFSET_FROM_HP_TO_TP; 1085 1086 hal->shadow_reg_addr[shadow_cfg_idx] = target_reg; 1087 1088 /* update hp/tp addr to hal structure*/ 1089 ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type, 1090 ring_num); 1091 1092 ath11k_dbg(ab, ATH11k_DBG_HAL, 1093 "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d", 1094 target_reg, 1095 HAL_SHADOW_REG(shadow_cfg_idx), 1096 shadow_cfg_idx, 1097 ring_type, ring_num); 1098 1099 return 0; 1100 } 1101 1102 void ath11k_hal_srng_shadow_config(struct ath11k_base *ab) 1103 { 1104 struct ath11k_hal *hal = &ab->hal; 1105 int ring_type, ring_num; 1106 1107 /* update all the non-CE srngs. */ 1108 for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) { 1109 struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; 1110 1111 if (ring_type == HAL_CE_SRC || 1112 ring_type == HAL_CE_DST || 1113 ring_type == HAL_CE_DST_STATUS) 1114 continue; 1115 1116 if (srng_config->lmac_ring) 1117 continue; 1118 1119 for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++) 1120 ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num); 1121 } 1122 } 1123 1124 void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, 1125 u32 **cfg, u32 *len) 1126 { 1127 struct ath11k_hal *hal = &ab->hal; 1128 1129 *len = hal->num_shadow_reg_configured; 1130 *cfg = hal->shadow_reg_addr; 1131 } 1132 1133 void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, 1134 struct hal_srng *srng) 1135 { 1136 lockdep_assert_held(&srng->lock); 1137 1138 /* check whether the ring is emptry. Update the shadow 1139 * HP only when then ring isn't' empty. 1140 */ 1141 if (srng->ring_dir == HAL_SRNG_DIR_SRC && 1142 *srng->u.src_ring.tp_addr != srng->u.src_ring.hp) 1143 ath11k_hal_srng_access_end(ab, srng); 1144 } 1145 1146 static int ath11k_hal_srng_create_config(struct ath11k_base *ab) 1147 { 1148 struct ath11k_hal *hal = &ab->hal; 1149 struct hal_srng_config *s; 1150 1151 hal->srng_config = kmemdup(hw_srng_config_template, 1152 sizeof(hw_srng_config_template), 1153 GFP_KERNEL); 1154 if (!hal->srng_config) 1155 return -ENOMEM; 1156 1157 s = &hal->srng_config[HAL_REO_DST]; 1158 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); 1159 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab); 1160 s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); 1161 s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab); 1162 1163 s = &hal->srng_config[HAL_REO_EXCEPTION]; 1164 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab); 1165 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab); 1166 1167 s = &hal->srng_config[HAL_REO_REINJECT]; 1168 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB; 1169 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; 1170 1171 s = &hal->srng_config[HAL_REO_CMD]; 1172 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB; 1173 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; 1174 1175 s = &hal->srng_config[HAL_REO_STATUS]; 1176 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); 1177 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab); 1178 1179 s = &hal->srng_config[HAL_TCL_DATA]; 1180 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); 1181 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; 1182 s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); 1183 s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; 1184 1185 s = &hal->srng_config[HAL_TCL_CMD]; 1186 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); 1187 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; 1188 1189 s = &hal->srng_config[HAL_TCL_STATUS]; 1190 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); 1191 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; 1192 1193 s = &hal->srng_config[HAL_CE_SRC]; 1194 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; 1195 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; 1196 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - 1197 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); 1198 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - 1199 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); 1200 1201 s = &hal->srng_config[HAL_CE_DST]; 1202 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; 1203 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; 1204 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - 1205 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); 1206 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - 1207 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); 1208 1209 s = &hal->srng_config[HAL_CE_DST_STATUS]; 1210 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + 1211 HAL_CE_DST_STATUS_RING_BASE_LSB; 1212 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; 1213 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - 1214 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); 1215 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - 1216 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); 1217 1218 s = &hal->srng_config[HAL_WBM_IDLE_LINK]; 1219 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); 1220 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; 1221 1222 s = &hal->srng_config[HAL_SW2WBM_RELEASE]; 1223 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab); 1224 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP; 1225 1226 s = &hal->srng_config[HAL_WBM2SW_RELEASE]; 1227 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); 1228 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; 1229 s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - 1230 HAL_WBM0_RELEASE_RING_BASE_LSB(ab); 1231 s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; 1232 1233 return 0; 1234 } 1235 1236 int ath11k_hal_srng_init(struct ath11k_base *ab) 1237 { 1238 struct ath11k_hal *hal = &ab->hal; 1239 int ret; 1240 1241 memset(hal, 0, sizeof(*hal)); 1242 1243 ret = ath11k_hal_srng_create_config(ab); 1244 if (ret) 1245 goto err_hal; 1246 1247 ret = ath11k_hal_alloc_cont_rdp(ab); 1248 if (ret) 1249 goto err_hal; 1250 1251 ret = ath11k_hal_alloc_cont_wrp(ab); 1252 if (ret) 1253 goto err_free_cont_rdp; 1254 1255 return 0; 1256 1257 err_free_cont_rdp: 1258 ath11k_hal_free_cont_rdp(ab); 1259 1260 err_hal: 1261 return ret; 1262 } 1263 EXPORT_SYMBOL(ath11k_hal_srng_init); 1264 1265 void ath11k_hal_srng_deinit(struct ath11k_base *ab) 1266 { 1267 struct ath11k_hal *hal = &ab->hal; 1268 1269 ath11k_hal_free_cont_rdp(ab); 1270 ath11k_hal_free_cont_wrp(ab); 1271 kfree(hal->srng_config); 1272 } 1273 EXPORT_SYMBOL(ath11k_hal_srng_deinit); 1274 1275 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) 1276 { 1277 struct hal_srng *srng; 1278 struct ath11k_ext_irq_grp *irq_grp; 1279 struct ath11k_ce_pipe *ce_pipe; 1280 int i; 1281 1282 ath11k_err(ab, "Last interrupt received for each CE:\n"); 1283 for (i = 0; i < ab->hw_params.ce_count; i++) { 1284 ce_pipe = &ab->ce.ce_pipe[i]; 1285 1286 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1287 continue; 1288 1289 ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n", 1290 i, ce_pipe->pipe_num, 1291 jiffies_to_msecs(jiffies - ce_pipe->timestamp)); 1292 } 1293 1294 ath11k_err(ab, "\nLast interrupt received for each group:\n"); 1295 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 1296 irq_grp = &ab->ext_irq_grp[i]; 1297 ath11k_err(ab, "group_id %d %ums before\n", 1298 irq_grp->grp_id, 1299 jiffies_to_msecs(jiffies - irq_grp->timestamp)); 1300 } 1301 1302 for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) { 1303 srng = &ab->hal.srng_list[i]; 1304 1305 if (!srng->initialized) 1306 continue; 1307 1308 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 1309 ath11k_err(ab, 1310 "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n", 1311 srng->ring_id, srng->u.src_ring.hp, 1312 srng->u.src_ring.reap_hp, 1313 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp, 1314 srng->u.src_ring.last_tp, 1315 jiffies_to_msecs(jiffies - srng->timestamp)); 1316 else if (srng->ring_dir == HAL_SRNG_DIR_DST) 1317 ath11k_err(ab, 1318 "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n", 1319 srng->ring_id, srng->u.dst_ring.tp, 1320 *srng->u.dst_ring.hp_addr, 1321 srng->u.dst_ring.cached_hp, 1322 srng->u.dst_ring.last_hp, 1323 jiffies_to_msecs(jiffies - srng->timestamp)); 1324 } 1325 } 1326