1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 #include <linux/dma-mapping.h> 6 #include "hal_tx.h" 7 #include "debug.h" 8 #include "hal_desc.h" 9 #include "hif.h" 10 11 static const struct hal_srng_config hw_srng_config[] = { 12 /* TODO: max_rings can populated by querying HW capabilities */ 13 { /* REO_DST */ 14 .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, 15 .max_rings = 4, 16 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, 17 .lmac_ring = false, 18 .ring_dir = HAL_SRNG_DIR_DST, 19 .reg_start = { 20 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB, 21 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP, 22 }, 23 .reg_size = { 24 HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB, 25 HAL_REO2_RING_HP - HAL_REO1_RING_HP, 26 }, 27 .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, 28 }, 29 { /* REO_EXCEPTION */ 30 /* Designating REO2TCL ring as exception ring. This ring is 31 * similar to other REO2SW rings though it is named as REO2TCL. 32 * Any of theREO2SW rings can be used as exception ring. 33 */ 34 .start_ring_id = HAL_SRNG_RING_ID_REO2TCL, 35 .max_rings = 1, 36 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, 37 .lmac_ring = false, 38 .ring_dir = HAL_SRNG_DIR_DST, 39 .reg_start = { 40 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB, 41 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP, 42 }, 43 .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE, 44 }, 45 { /* REO_REINJECT */ 46 .start_ring_id = HAL_SRNG_RING_ID_SW2REO, 47 .max_rings = 1, 48 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 49 .lmac_ring = false, 50 .ring_dir = HAL_SRNG_DIR_SRC, 51 .reg_start = { 52 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB, 53 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP, 54 }, 55 .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, 56 }, 57 { /* REO_CMD */ 58 .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, 59 .max_rings = 1, 60 .entry_size = (sizeof(struct hal_tlv_hdr) + 61 sizeof(struct hal_reo_get_queue_stats)) >> 2, 62 .lmac_ring = false, 63 .ring_dir = HAL_SRNG_DIR_SRC, 64 .reg_start = { 65 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB, 66 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP, 67 }, 68 .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, 69 }, 70 { /* REO_STATUS */ 71 .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, 72 .max_rings = 1, 73 .entry_size = (sizeof(struct hal_tlv_hdr) + 74 sizeof(struct hal_reo_get_queue_stats_status)) >> 2, 75 .lmac_ring = false, 76 .ring_dir = HAL_SRNG_DIR_DST, 77 .reg_start = { 78 HAL_SEQ_WCSS_UMAC_REO_REG + 79 HAL_REO_STATUS_RING_BASE_LSB, 80 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP, 81 }, 82 .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, 83 }, 84 { /* TCL_DATA */ 85 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, 86 .max_rings = 3, 87 .entry_size = (sizeof(struct hal_tlv_hdr) + 88 sizeof(struct hal_tcl_data_cmd)) >> 2, 89 .lmac_ring = false, 90 .ring_dir = HAL_SRNG_DIR_SRC, 91 .reg_start = { 92 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB, 93 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP, 94 }, 95 .reg_size = { 96 HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB, 97 HAL_TCL2_RING_HP - HAL_TCL1_RING_HP, 98 }, 99 .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, 100 }, 101 { /* TCL_CMD */ 102 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, 103 .max_rings = 1, 104 .entry_size = (sizeof(struct hal_tlv_hdr) + 105 sizeof(struct hal_tcl_gse_cmd)) >> 2, 106 .lmac_ring = false, 107 .ring_dir = HAL_SRNG_DIR_SRC, 108 .reg_start = { 109 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB, 110 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP, 111 }, 112 .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, 113 }, 114 { /* TCL_STATUS */ 115 .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, 116 .max_rings = 1, 117 .entry_size = (sizeof(struct hal_tlv_hdr) + 118 sizeof(struct hal_tcl_status_ring)) >> 2, 119 .lmac_ring = false, 120 .ring_dir = HAL_SRNG_DIR_DST, 121 .reg_start = { 122 HAL_SEQ_WCSS_UMAC_TCL_REG + 123 HAL_TCL_STATUS_RING_BASE_LSB, 124 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP, 125 }, 126 .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, 127 }, 128 { /* CE_SRC */ 129 .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, 130 .max_rings = 12, 131 .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, 132 .lmac_ring = false, 133 .ring_dir = HAL_SRNG_DIR_SRC, 134 .reg_start = { 135 (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + 136 HAL_CE_DST_RING_BASE_LSB), 137 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP, 138 }, 139 .reg_size = { 140 (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - 141 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), 142 (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG - 143 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG), 144 }, 145 .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, 146 }, 147 { /* CE_DST */ 148 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, 149 .max_rings = 12, 150 .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, 151 .lmac_ring = false, 152 .ring_dir = HAL_SRNG_DIR_SRC, 153 .reg_start = { 154 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + 155 HAL_CE_DST_RING_BASE_LSB), 156 HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP, 157 }, 158 .reg_size = { 159 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - 160 HAL_SEQ_WCSS_UMAC_CE0_DST_REG), 161 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - 162 HAL_SEQ_WCSS_UMAC_CE0_DST_REG), 163 }, 164 .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, 165 }, 166 { /* CE_DST_STATUS */ 167 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, 168 .max_rings = 12, 169 .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, 170 .lmac_ring = false, 171 .ring_dir = HAL_SRNG_DIR_DST, 172 .reg_start = { 173 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + 174 HAL_CE_DST_STATUS_RING_BASE_LSB), 175 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG + 176 HAL_CE_DST_STATUS_RING_HP), 177 }, 178 .reg_size = { 179 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - 180 HAL_SEQ_WCSS_UMAC_CE0_DST_REG), 181 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG - 182 HAL_SEQ_WCSS_UMAC_CE0_DST_REG), 183 }, 184 .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, 185 }, 186 { /* WBM_IDLE_LINK */ 187 .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, 188 .max_rings = 1, 189 .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, 190 .lmac_ring = false, 191 .ring_dir = HAL_SRNG_DIR_SRC, 192 .reg_start = { 193 (HAL_SEQ_WCSS_UMAC_WBM_REG + 194 HAL_WBM_IDLE_LINK_RING_BASE_LSB), 195 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP), 196 }, 197 .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, 198 }, 199 { /* SW2WBM_RELEASE */ 200 .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE, 201 .max_rings = 1, 202 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, 203 .lmac_ring = false, 204 .ring_dir = HAL_SRNG_DIR_SRC, 205 .reg_start = { 206 (HAL_SEQ_WCSS_UMAC_WBM_REG + 207 HAL_WBM_RELEASE_RING_BASE_LSB), 208 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP), 209 }, 210 .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, 211 }, 212 { /* WBM2SW_RELEASE */ 213 .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, 214 .max_rings = 4, 215 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, 216 .lmac_ring = false, 217 .ring_dir = HAL_SRNG_DIR_DST, 218 .reg_start = { 219 (HAL_SEQ_WCSS_UMAC_WBM_REG + 220 HAL_WBM0_RELEASE_RING_BASE_LSB), 221 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP), 222 }, 223 .reg_size = { 224 (HAL_WBM1_RELEASE_RING_BASE_LSB - 225 HAL_WBM0_RELEASE_RING_BASE_LSB), 226 (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP), 227 }, 228 .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, 229 }, 230 { /* RXDMA_BUF */ 231 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF, 232 .max_rings = 2, 233 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 234 .lmac_ring = true, 235 .ring_dir = HAL_SRNG_DIR_SRC, 236 .max_size = HAL_RXDMA_RING_MAX_SIZE, 237 }, 238 { /* RXDMA_DST */ 239 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, 240 .max_rings = 1, 241 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 242 .lmac_ring = true, 243 .ring_dir = HAL_SRNG_DIR_DST, 244 .max_size = HAL_RXDMA_RING_MAX_SIZE, 245 }, 246 { /* RXDMA_MONITOR_BUF */ 247 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, 248 .max_rings = 1, 249 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 250 .lmac_ring = true, 251 .ring_dir = HAL_SRNG_DIR_SRC, 252 .max_size = HAL_RXDMA_RING_MAX_SIZE, 253 }, 254 { /* RXDMA_MONITOR_STATUS */ 255 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, 256 .max_rings = 1, 257 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 258 .lmac_ring = true, 259 .ring_dir = HAL_SRNG_DIR_SRC, 260 .max_size = HAL_RXDMA_RING_MAX_SIZE, 261 }, 262 { /* RXDMA_MONITOR_DST */ 263 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, 264 .max_rings = 1, 265 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, 266 .lmac_ring = true, 267 .ring_dir = HAL_SRNG_DIR_DST, 268 .max_size = HAL_RXDMA_RING_MAX_SIZE, 269 }, 270 { /* RXDMA_MONITOR_DESC */ 271 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, 272 .max_rings = 1, 273 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, 274 .lmac_ring = true, 275 .ring_dir = HAL_SRNG_DIR_SRC, 276 .max_size = HAL_RXDMA_RING_MAX_SIZE, 277 }, 278 { /* RXDMA DIR BUF */ 279 .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, 280 .max_rings = 1, 281 .entry_size = 8 >> 2, /* TODO: Define the struct */ 282 .lmac_ring = true, 283 .ring_dir = HAL_SRNG_DIR_SRC, 284 .max_size = HAL_RXDMA_RING_MAX_SIZE, 285 }, 286 }; 287 288 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab) 289 { 290 struct ath11k_hal *hal = &ab->hal; 291 size_t size; 292 293 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; 294 hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, 295 GFP_KERNEL); 296 if (!hal->rdp.vaddr) 297 return -ENOMEM; 298 299 return 0; 300 } 301 302 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab) 303 { 304 struct ath11k_hal *hal = &ab->hal; 305 size_t size; 306 307 if (!hal->rdp.vaddr) 308 return; 309 310 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; 311 dma_free_coherent(ab->dev, size, 312 hal->rdp.vaddr, hal->rdp.paddr); 313 hal->rdp.vaddr = NULL; 314 } 315 316 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab) 317 { 318 struct ath11k_hal *hal = &ab->hal; 319 size_t size; 320 321 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; 322 hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, 323 GFP_KERNEL); 324 if (!hal->wrp.vaddr) 325 return -ENOMEM; 326 327 return 0; 328 } 329 330 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab) 331 { 332 struct ath11k_hal *hal = &ab->hal; 333 size_t size; 334 335 if (!hal->wrp.vaddr) 336 return; 337 338 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; 339 dma_free_coherent(ab->dev, size, 340 hal->wrp.vaddr, hal->wrp.paddr); 341 hal->wrp.vaddr = NULL; 342 } 343 344 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, 345 struct hal_srng *srng, int ring_num) 346 { 347 const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST]; 348 u32 addr; 349 u32 val; 350 351 addr = HAL_CE_DST_RING_CTRL + 352 srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + 353 ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; 354 355 val = ath11k_hif_read32(ab, addr); 356 val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; 357 val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN, 358 srng->u.dst_ring.max_buffer_length); 359 ath11k_hif_write32(ab, addr, val); 360 } 361 362 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, 363 struct hal_srng *srng) 364 { 365 struct ath11k_hal *hal = &ab->hal; 366 u32 val; 367 u64 hp_addr; 368 u32 reg_base; 369 370 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 371 372 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { 373 ath11k_hif_write32(ab, reg_base + 374 HAL_REO1_RING_MSI1_BASE_LSB_OFFSET, 375 (u32)srng->msi_addr); 376 377 val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, 378 ((u64)srng->msi_addr >> 379 HAL_ADDR_MSB_REG_SHIFT)) | 380 HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; 381 ath11k_hif_write32(ab, reg_base + 382 HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val); 383 384 ath11k_hif_write32(ab, 385 reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET, 386 srng->msi_data); 387 } 388 389 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); 390 391 val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, 392 ((u64)srng->ring_base_paddr >> 393 HAL_ADDR_MSB_REG_SHIFT)) | 394 FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE, 395 (srng->entry_size * srng->num_entries)); 396 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val); 397 398 val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) | 399 FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); 400 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val); 401 402 /* interrupt setup */ 403 val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD, 404 (srng->intr_timer_thres_us >> 3)); 405 406 val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD, 407 (srng->intr_batch_cntr_thres_entries * 408 srng->entry_size)); 409 410 ath11k_hif_write32(ab, 411 reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET, 412 val); 413 414 hp_addr = hal->rdp.paddr + 415 ((unsigned long)srng->u.dst_ring.hp_addr - 416 (unsigned long)hal->rdp.vaddr); 417 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET, 418 hp_addr & HAL_ADDR_LSB_REG_MASK); 419 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET, 420 hp_addr >> HAL_ADDR_MSB_REG_SHIFT); 421 422 /* Initialize head and tail pointers to indicate ring is empty */ 423 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 424 ath11k_hif_write32(ab, reg_base, 0); 425 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0); 426 *srng->u.dst_ring.hp_addr = 0; 427 428 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 429 val = 0; 430 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) 431 val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP; 432 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) 433 val |= HAL_REO1_RING_MISC_HOST_FW_SWAP; 434 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) 435 val |= HAL_REO1_RING_MISC_MSI_SWAP; 436 val |= HAL_REO1_RING_MISC_SRNG_ENABLE; 437 438 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val); 439 } 440 441 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, 442 struct hal_srng *srng) 443 { 444 struct ath11k_hal *hal = &ab->hal; 445 u32 val; 446 u64 tp_addr; 447 u32 reg_base; 448 449 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 450 451 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { 452 ath11k_hif_write32(ab, reg_base + 453 HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET, 454 (u32)srng->msi_addr); 455 456 val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, 457 ((u64)srng->msi_addr >> 458 HAL_ADDR_MSB_REG_SHIFT)) | 459 HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; 460 ath11k_hif_write32(ab, reg_base + 461 HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET, 462 val); 463 464 ath11k_hif_write32(ab, reg_base + 465 HAL_TCL1_RING_MSI1_DATA_OFFSET, 466 srng->msi_data); 467 } 468 469 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); 470 471 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, 472 ((u64)srng->ring_base_paddr >> 473 HAL_ADDR_MSB_REG_SHIFT)) | 474 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, 475 (srng->entry_size * srng->num_entries)); 476 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val); 477 478 val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); 479 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val); 480 481 /* interrupt setup */ 482 /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the 483 * unit of 8 usecs instead of 1 usec (as required by v1). 484 */ 485 val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD, 486 srng->intr_timer_thres_us); 487 488 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD, 489 (srng->intr_batch_cntr_thres_entries * 490 srng->entry_size)); 491 492 ath11k_hif_write32(ab, 493 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET, 494 val); 495 496 val = 0; 497 if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { 498 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD, 499 srng->u.src_ring.low_threshold); 500 } 501 ath11k_hif_write32(ab, 502 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET, 503 val); 504 505 if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) { 506 tp_addr = hal->rdp.paddr + 507 ((unsigned long)srng->u.src_ring.tp_addr - 508 (unsigned long)hal->rdp.vaddr); 509 ath11k_hif_write32(ab, 510 reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET, 511 tp_addr & HAL_ADDR_LSB_REG_MASK); 512 ath11k_hif_write32(ab, 513 reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET, 514 tp_addr >> HAL_ADDR_MSB_REG_SHIFT); 515 } 516 517 /* Initialize head and tail pointers to indicate ring is empty */ 518 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 519 ath11k_hif_write32(ab, reg_base, 0); 520 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); 521 *srng->u.src_ring.tp_addr = 0; 522 523 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; 524 val = 0; 525 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) 526 val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP; 527 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) 528 val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP; 529 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) 530 val |= HAL_TCL1_RING_MISC_MSI_SWAP; 531 532 /* Loop count is not used for SRC rings */ 533 val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE; 534 535 val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; 536 537 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val); 538 } 539 540 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, 541 struct hal_srng *srng) 542 { 543 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 544 ath11k_hal_srng_src_hw_init(ab, srng); 545 else 546 ath11k_hal_srng_dst_hw_init(ab, srng); 547 } 548 549 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab, 550 enum hal_ring_type type, 551 int ring_num, int mac_id) 552 { 553 const struct hal_srng_config *srng_config = &hw_srng_config[type]; 554 int ring_id; 555 556 if (ring_num >= srng_config->max_rings) { 557 ath11k_warn(ab, "invalid ring number :%d\n", ring_num); 558 return -EINVAL; 559 } 560 561 ring_id = srng_config->start_ring_id + ring_num; 562 if (srng_config->lmac_ring) 563 ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC; 564 565 if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX)) 566 return -EINVAL; 567 568 return ring_id; 569 } 570 571 int ath11k_hal_srng_get_entrysize(u32 ring_type) 572 { 573 const struct hal_srng_config *srng_config; 574 575 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) 576 return -EINVAL; 577 578 srng_config = &hw_srng_config[ring_type]; 579 580 return (srng_config->entry_size << 2); 581 } 582 583 int ath11k_hal_srng_get_max_entries(u32 ring_type) 584 { 585 const struct hal_srng_config *srng_config; 586 587 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) 588 return -EINVAL; 589 590 srng_config = &hw_srng_config[ring_type]; 591 592 return (srng_config->max_size / srng_config->entry_size); 593 } 594 595 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, 596 struct hal_srng_params *params) 597 { 598 params->ring_base_paddr = srng->ring_base_paddr; 599 params->ring_base_vaddr = srng->ring_base_vaddr; 600 params->num_entries = srng->num_entries; 601 params->intr_timer_thres_us = srng->intr_timer_thres_us; 602 params->intr_batch_cntr_thres_entries = 603 srng->intr_batch_cntr_thres_entries; 604 params->low_threshold = srng->u.src_ring.low_threshold; 605 params->flags = srng->flags; 606 } 607 608 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, 609 struct hal_srng *srng) 610 { 611 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) 612 return 0; 613 614 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 615 return ab->hal.wrp.paddr + 616 ((unsigned long)srng->u.src_ring.hp_addr - 617 (unsigned long)ab->hal.wrp.vaddr); 618 else 619 return ab->hal.rdp.paddr + 620 ((unsigned long)srng->u.dst_ring.hp_addr - 621 (unsigned long)ab->hal.rdp.vaddr); 622 } 623 624 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, 625 struct hal_srng *srng) 626 { 627 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) 628 return 0; 629 630 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 631 return ab->hal.rdp.paddr + 632 ((unsigned long)srng->u.src_ring.tp_addr - 633 (unsigned long)ab->hal.rdp.vaddr); 634 else 635 return ab->hal.wrp.paddr + 636 ((unsigned long)srng->u.dst_ring.tp_addr - 637 (unsigned long)ab->hal.wrp.vaddr); 638 } 639 640 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) 641 { 642 switch (type) { 643 case HAL_CE_DESC_SRC: 644 return sizeof(struct hal_ce_srng_src_desc); 645 case HAL_CE_DESC_DST: 646 return sizeof(struct hal_ce_srng_dest_desc); 647 case HAL_CE_DESC_DST_STATUS: 648 return sizeof(struct hal_ce_srng_dst_status_desc); 649 } 650 651 return 0; 652 } 653 654 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, 655 u8 byte_swap_data) 656 { 657 struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf; 658 659 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; 660 desc->buffer_addr_info = 661 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI, 662 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 663 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP, 664 byte_swap_data) | 665 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) | 666 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len); 667 desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id); 668 } 669 670 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) 671 { 672 struct hal_ce_srng_dest_desc *desc = 673 (struct hal_ce_srng_dest_desc *)buf; 674 675 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; 676 desc->buffer_addr_info = 677 FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI, 678 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)); 679 } 680 681 u32 ath11k_hal_ce_dst_status_get_length(void *buf) 682 { 683 struct hal_ce_srng_dst_status_desc *desc = 684 (struct hal_ce_srng_dst_status_desc *)buf; 685 u32 len; 686 687 len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); 688 desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; 689 690 return len; 691 } 692 693 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, 694 dma_addr_t paddr) 695 { 696 desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 697 (paddr & HAL_ADDR_LSB_REG_MASK)); 698 desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, 699 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 700 FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) | 701 FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie); 702 } 703 704 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) 705 { 706 lockdep_assert_held(&srng->lock); 707 708 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) 709 return (srng->ring_base_vaddr + srng->u.dst_ring.tp); 710 711 return NULL; 712 } 713 714 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, 715 struct hal_srng *srng) 716 { 717 u32 *desc; 718 719 lockdep_assert_held(&srng->lock); 720 721 if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) 722 return NULL; 723 724 desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; 725 726 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % 727 srng->ring_size; 728 729 return desc; 730 } 731 732 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, 733 bool sync_hw_ptr) 734 { 735 u32 tp, hp; 736 737 lockdep_assert_held(&srng->lock); 738 739 tp = srng->u.dst_ring.tp; 740 741 if (sync_hw_ptr) { 742 hp = *srng->u.dst_ring.hp_addr; 743 srng->u.dst_ring.cached_hp = hp; 744 } else { 745 hp = srng->u.dst_ring.cached_hp; 746 } 747 748 if (hp >= tp) 749 return (hp - tp) / srng->entry_size; 750 else 751 return (srng->ring_size - tp + hp) / srng->entry_size; 752 } 753 754 /* Returns number of available entries in src ring */ 755 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, 756 bool sync_hw_ptr) 757 { 758 u32 tp, hp; 759 760 lockdep_assert_held(&srng->lock); 761 762 hp = srng->u.src_ring.hp; 763 764 if (sync_hw_ptr) { 765 tp = *srng->u.src_ring.tp_addr; 766 srng->u.src_ring.cached_tp = tp; 767 } else { 768 tp = srng->u.src_ring.cached_tp; 769 } 770 771 if (tp > hp) 772 return ((tp - hp) / srng->entry_size) - 1; 773 else 774 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; 775 } 776 777 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, 778 struct hal_srng *srng) 779 { 780 u32 *desc; 781 u32 next_hp; 782 783 lockdep_assert_held(&srng->lock); 784 785 /* TODO: Using % is expensive, but we have to do this since size of some 786 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see 787 * if separate function is defined for rings having power of 2 ring size 788 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the 789 * overhead of % by using mask (with &). 790 */ 791 next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; 792 793 if (next_hp == srng->u.src_ring.cached_tp) 794 return NULL; 795 796 desc = srng->ring_base_vaddr + srng->u.src_ring.hp; 797 srng->u.src_ring.hp = next_hp; 798 799 /* TODO: Reap functionality is not used by all rings. If particular 800 * ring does not use reap functionality, we need not update reap_hp 801 * with next_hp pointer. Need to make sure a separate function is used 802 * before doing any optimization by removing below code updating 803 * reap_hp. 804 */ 805 srng->u.src_ring.reap_hp = next_hp; 806 807 return desc; 808 } 809 810 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, 811 struct hal_srng *srng) 812 { 813 u32 *desc; 814 u32 next_reap_hp; 815 816 lockdep_assert_held(&srng->lock); 817 818 next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % 819 srng->ring_size; 820 821 if (next_reap_hp == srng->u.src_ring.cached_tp) 822 return NULL; 823 824 desc = srng->ring_base_vaddr + next_reap_hp; 825 srng->u.src_ring.reap_hp = next_reap_hp; 826 827 return desc; 828 } 829 830 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, 831 struct hal_srng *srng) 832 { 833 u32 *desc; 834 835 lockdep_assert_held(&srng->lock); 836 837 if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) 838 return NULL; 839 840 desc = srng->ring_base_vaddr + srng->u.src_ring.hp; 841 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % 842 srng->ring_size; 843 844 return desc; 845 } 846 847 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) 848 { 849 lockdep_assert_held(&srng->lock); 850 851 if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == 852 srng->u.src_ring.cached_tp) 853 return NULL; 854 855 return srng->ring_base_vaddr + srng->u.src_ring.hp; 856 } 857 858 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) 859 { 860 lockdep_assert_held(&srng->lock); 861 862 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 863 srng->u.src_ring.cached_tp = 864 *(volatile u32 *)srng->u.src_ring.tp_addr; 865 else 866 srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; 867 } 868 869 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() 870 * should have been called before this. 871 */ 872 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) 873 { 874 lockdep_assert_held(&srng->lock); 875 876 /* TODO: See if we need a write memory barrier here */ 877 if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { 878 /* For LMAC rings, ring pointer updates are done through FW and 879 * hence written to a shared memory location that is read by FW 880 */ 881 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 882 srng->u.src_ring.last_tp = 883 *(volatile u32 *)srng->u.src_ring.tp_addr; 884 *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; 885 } else { 886 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; 887 *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; 888 } 889 } else { 890 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 891 srng->u.src_ring.last_tp = 892 *(volatile u32 *)srng->u.src_ring.tp_addr; 893 ath11k_hif_write32(ab, 894 (unsigned long)srng->u.src_ring.hp_addr - 895 (unsigned long)ab->mem, 896 srng->u.src_ring.hp); 897 } else { 898 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; 899 ath11k_hif_write32(ab, 900 (unsigned long)srng->u.dst_ring.tp_addr - 901 (unsigned long)ab->mem, 902 srng->u.dst_ring.tp); 903 } 904 } 905 906 srng->timestamp = jiffies; 907 } 908 909 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, 910 struct hal_wbm_idle_scatter_list *sbuf, 911 u32 nsbufs, u32 tot_link_desc, 912 u32 end_offset) 913 { 914 struct ath11k_buffer_addr *link_addr; 915 int i; 916 u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64; 917 918 link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; 919 920 for (i = 1; i < nsbufs; i++) { 921 link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK; 922 link_addr->info1 = FIELD_PREP( 923 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 924 (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) | 925 FIELD_PREP( 926 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, 927 BASE_ADDR_MATCH_TAG_VAL); 928 929 link_addr = (void *)sbuf[i].vaddr + 930 HAL_WBM_IDLE_SCATTER_BUF_SIZE; 931 } 932 933 ath11k_hif_write32(ab, 934 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR, 935 FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) | 936 FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1)); 937 ath11k_hif_write32(ab, 938 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR, 939 FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, 940 reg_scatter_buf_sz * nsbufs)); 941 ath11k_hif_write32(ab, 942 HAL_SEQ_WCSS_UMAC_WBM_REG + 943 HAL_WBM_SCATTERED_RING_BASE_LSB, 944 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 945 sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK)); 946 ath11k_hif_write32(ab, 947 HAL_SEQ_WCSS_UMAC_WBM_REG + 948 HAL_WBM_SCATTERED_RING_BASE_MSB, 949 FIELD_PREP( 950 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 951 (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) | 952 FIELD_PREP( 953 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, 954 BASE_ADDR_MATCH_TAG_VAL)); 955 956 /* Setup head and tail pointers for the idle list */ 957 ath11k_hif_write32(ab, 958 HAL_SEQ_WCSS_UMAC_WBM_REG + 959 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, 960 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 961 sbuf[nsbufs - 1].paddr)); 962 ath11k_hif_write32(ab, 963 HAL_SEQ_WCSS_UMAC_WBM_REG + 964 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1, 965 FIELD_PREP( 966 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 967 ((u64)sbuf[nsbufs - 1].paddr >> 968 HAL_ADDR_MSB_REG_SHIFT)) | 969 FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1, 970 (end_offset >> 2))); 971 ath11k_hif_write32(ab, 972 HAL_SEQ_WCSS_UMAC_WBM_REG + 973 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, 974 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 975 sbuf[0].paddr)); 976 977 ath11k_hif_write32(ab, 978 HAL_SEQ_WCSS_UMAC_WBM_REG + 979 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0, 980 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, 981 sbuf[0].paddr)); 982 ath11k_hif_write32(ab, 983 HAL_SEQ_WCSS_UMAC_WBM_REG + 984 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1, 985 FIELD_PREP( 986 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, 987 ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | 988 FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 989 0)); 990 ath11k_hif_write32(ab, 991 HAL_SEQ_WCSS_UMAC_WBM_REG + 992 HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR, 993 2 * tot_link_desc); 994 995 /* Enable the SRNG */ 996 ath11k_hif_write32(ab, 997 HAL_SEQ_WCSS_UMAC_WBM_REG + 998 HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40); 999 } 1000 1001 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, 1002 int ring_num, int mac_id, 1003 struct hal_srng_params *params) 1004 { 1005 struct ath11k_hal *hal = &ab->hal; 1006 const struct hal_srng_config *srng_config = &hw_srng_config[type]; 1007 struct hal_srng *srng; 1008 int ring_id; 1009 u32 lmac_idx; 1010 int i; 1011 u32 reg_base; 1012 1013 ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); 1014 if (ring_id < 0) 1015 return ring_id; 1016 1017 srng = &hal->srng_list[ring_id]; 1018 1019 srng->ring_id = ring_id; 1020 srng->ring_dir = srng_config->ring_dir; 1021 srng->ring_base_paddr = params->ring_base_paddr; 1022 srng->ring_base_vaddr = params->ring_base_vaddr; 1023 srng->entry_size = srng_config->entry_size; 1024 srng->num_entries = params->num_entries; 1025 srng->ring_size = srng->entry_size * srng->num_entries; 1026 srng->intr_batch_cntr_thres_entries = 1027 params->intr_batch_cntr_thres_entries; 1028 srng->intr_timer_thres_us = params->intr_timer_thres_us; 1029 srng->flags = params->flags; 1030 srng->initialized = 1; 1031 spin_lock_init(&srng->lock); 1032 1033 for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { 1034 srng->hwreg_base[i] = srng_config->reg_start[i] + 1035 (ring_num * srng_config->reg_size[i]); 1036 } 1037 1038 memset(srng->ring_base_vaddr, 0, 1039 (srng->entry_size * srng->num_entries) << 2); 1040 1041 /* TODO: Add comments on these swap configurations */ 1042 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1043 srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP | 1044 HAL_SRNG_FLAGS_RING_PTR_SWAP; 1045 1046 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; 1047 1048 if (srng->ring_dir == HAL_SRNG_DIR_SRC) { 1049 srng->u.src_ring.hp = 0; 1050 srng->u.src_ring.cached_tp = 0; 1051 srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; 1052 srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); 1053 srng->u.src_ring.low_threshold = params->low_threshold * 1054 srng->entry_size; 1055 if (srng_config->lmac_ring) { 1056 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; 1057 srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + 1058 lmac_idx); 1059 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; 1060 } else { 1061 srng->u.src_ring.hp_addr = 1062 (u32 *)((unsigned long)ab->mem + reg_base); 1063 } 1064 } else { 1065 /* During initialization loop count in all the descriptors 1066 * will be set to zero, and HW will set it to 1 on completing 1067 * descriptor update in first loop, and increments it by 1 on 1068 * subsequent loops (loop count wraps around after reaching 1069 * 0xffff). The 'loop_cnt' in SW ring state is the expected 1070 * loop count in descriptors updated by HW (to be processed 1071 * by SW). 1072 */ 1073 srng->u.dst_ring.loop_cnt = 1; 1074 srng->u.dst_ring.tp = 0; 1075 srng->u.dst_ring.cached_hp = 0; 1076 srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); 1077 if (srng_config->lmac_ring) { 1078 /* For LMAC rings, tail pointer updates will be done 1079 * through FW by writing to a shared memory location 1080 */ 1081 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; 1082 srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + 1083 lmac_idx); 1084 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; 1085 } else { 1086 srng->u.dst_ring.tp_addr = 1087 (u32 *)((unsigned long)ab->mem + reg_base + 1088 (HAL_REO1_RING_TP - HAL_REO1_RING_HP)); 1089 } 1090 } 1091 1092 if (srng_config->lmac_ring) 1093 return ring_id; 1094 1095 ath11k_hal_srng_hw_init(ab, srng); 1096 1097 if (type == HAL_CE_DST) { 1098 srng->u.dst_ring.max_buffer_length = params->max_buffer_len; 1099 ath11k_hal_ce_dst_setup(ab, srng, ring_num); 1100 } 1101 1102 return ring_id; 1103 } 1104 1105 int ath11k_hal_srng_init(struct ath11k_base *ab) 1106 { 1107 struct ath11k_hal *hal = &ab->hal; 1108 int ret; 1109 1110 memset(hal, 0, sizeof(*hal)); 1111 1112 hal->srng_config = hw_srng_config; 1113 1114 ret = ath11k_hal_alloc_cont_rdp(ab); 1115 if (ret) 1116 goto err_hal; 1117 1118 ret = ath11k_hal_alloc_cont_wrp(ab); 1119 if (ret) 1120 goto err_free_cont_rdp; 1121 1122 return 0; 1123 1124 err_free_cont_rdp: 1125 ath11k_hal_free_cont_rdp(ab); 1126 1127 err_hal: 1128 return ret; 1129 } 1130 1131 void ath11k_hal_srng_deinit(struct ath11k_base *ab) 1132 { 1133 ath11k_hal_free_cont_rdp(ab); 1134 ath11k_hal_free_cont_wrp(ab); 1135 } 1136 1137 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) 1138 { 1139 struct hal_srng *srng; 1140 struct ath11k_ext_irq_grp *irq_grp; 1141 struct ath11k_ce_pipe *ce_pipe; 1142 int i; 1143 1144 ath11k_err(ab, "Last interrupt received for each CE:\n"); 1145 for (i = 0; i < CE_COUNT; i++) { 1146 ce_pipe = &ab->ce.ce_pipe[i]; 1147 1148 if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR) 1149 continue; 1150 1151 ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n", 1152 i, ce_pipe->pipe_num, 1153 jiffies_to_msecs(jiffies - ce_pipe->timestamp)); 1154 } 1155 1156 ath11k_err(ab, "\nLast interrupt received for each group:\n"); 1157 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 1158 irq_grp = &ab->ext_irq_grp[i]; 1159 ath11k_err(ab, "group_id %d %ums before\n", 1160 irq_grp->grp_id, 1161 jiffies_to_msecs(jiffies - irq_grp->timestamp)); 1162 } 1163 1164 for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) { 1165 srng = &ab->hal.srng_list[i]; 1166 1167 if (!srng->initialized) 1168 continue; 1169 1170 if (srng->ring_dir == HAL_SRNG_DIR_SRC) 1171 ath11k_err(ab, 1172 "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n", 1173 srng->ring_id, srng->u.src_ring.hp, 1174 srng->u.src_ring.reap_hp, 1175 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp, 1176 srng->u.src_ring.last_tp, 1177 jiffies_to_msecs(jiffies - srng->timestamp)); 1178 else if (srng->ring_dir == HAL_SRNG_DIR_DST) 1179 ath11k_err(ab, 1180 "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n", 1181 srng->ring_id, srng->u.dst_ring.tp, 1182 *srng->u.dst_ring.hp_addr, 1183 srng->u.dst_ring.cached_hp, 1184 srng->u.dst_ring.last_hp, 1185 jiffies_to_msecs(jiffies - srng->timestamp)); 1186 } 1187 } 1188