1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) ST-Ericsson SA 2007-2010 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/platform_data/dma-ste-dma40.h> 10 11 #include "ste_dma40_ll.h" 12 13 static u8 d40_width_to_bits(enum dma_slave_buswidth width) 14 { 15 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 16 return STEDMA40_ESIZE_8_BIT; 17 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 18 return STEDMA40_ESIZE_16_BIT; 19 else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES) 20 return STEDMA40_ESIZE_64_BIT; 21 else 22 return STEDMA40_ESIZE_32_BIT; 23 } 24 25 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */ 26 void d40_log_cfg(struct stedma40_chan_cfg *cfg, 27 u32 *lcsp1, u32 *lcsp3) 28 { 29 u32 l3 = 0; /* dst */ 30 u32 l1 = 0; /* src */ 31 32 /* src is mem? -> increase address pos */ 33 if (cfg->dir == DMA_MEM_TO_DEV || 34 cfg->dir == DMA_MEM_TO_MEM) 35 l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS); 36 37 /* dst is mem? -> increase address pos */ 38 if (cfg->dir == DMA_DEV_TO_MEM || 39 cfg->dir == DMA_MEM_TO_MEM) 40 l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS); 41 42 /* src is hw? -> master port 1 */ 43 if (cfg->dir == DMA_DEV_TO_MEM || 44 cfg->dir == DMA_DEV_TO_DEV) 45 l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS); 46 47 /* dst is hw? -> master port 1 */ 48 if (cfg->dir == DMA_MEM_TO_DEV || 49 cfg->dir == DMA_DEV_TO_DEV) 50 l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS); 51 52 l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS); 53 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 54 l3 |= d40_width_to_bits(cfg->dst_info.data_width) 55 << D40_MEM_LCSP3_DCFG_ESIZE_POS; 56 57 l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS); 58 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 59 l1 |= d40_width_to_bits(cfg->src_info.data_width) 60 << D40_MEM_LCSP1_SCFG_ESIZE_POS; 61 62 *lcsp1 = l1; 63 *lcsp3 = l3; 64 65 } 66 67 void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg) 68 { 69 u32 src = 0; 70 u32 dst = 0; 71 72 if ((cfg->dir == DMA_DEV_TO_MEM) || 73 (cfg->dir == DMA_DEV_TO_DEV)) { 74 /* Set master port to 1 */ 75 src |= BIT(D40_SREG_CFG_MST_POS); 76 src |= D40_TYPE_TO_EVENT(cfg->dev_type); 77 78 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 79 src |= BIT(D40_SREG_CFG_PHY_TM_POS); 80 else 81 src |= 3 << D40_SREG_CFG_PHY_TM_POS; 82 } 83 if ((cfg->dir == DMA_MEM_TO_DEV) || 84 (cfg->dir == DMA_DEV_TO_DEV)) { 85 /* Set master port to 1 */ 86 dst |= BIT(D40_SREG_CFG_MST_POS); 87 dst |= D40_TYPE_TO_EVENT(cfg->dev_type); 88 89 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 90 dst |= BIT(D40_SREG_CFG_PHY_TM_POS); 91 else 92 dst |= 3 << D40_SREG_CFG_PHY_TM_POS; 93 } 94 /* Interrupt on end of transfer for destination */ 95 dst |= BIT(D40_SREG_CFG_TIM_POS); 96 97 /* Generate interrupt on error */ 98 src |= BIT(D40_SREG_CFG_EIM_POS); 99 dst |= BIT(D40_SREG_CFG_EIM_POS); 100 101 /* PSIZE */ 102 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { 103 src |= BIT(D40_SREG_CFG_PHY_PEN_POS); 104 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; 105 } 106 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { 107 dst |= BIT(D40_SREG_CFG_PHY_PEN_POS); 108 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; 109 } 110 111 /* Element size */ 112 src |= d40_width_to_bits(cfg->src_info.data_width) 113 << D40_SREG_CFG_ESIZE_POS; 114 dst |= d40_width_to_bits(cfg->dst_info.data_width) 115 << D40_SREG_CFG_ESIZE_POS; 116 117 /* Set the priority bit to high for the physical channel */ 118 if (cfg->high_priority) { 119 src |= BIT(D40_SREG_CFG_PRI_POS); 120 dst |= BIT(D40_SREG_CFG_PRI_POS); 121 } 122 123 if (cfg->src_info.big_endian) 124 src |= BIT(D40_SREG_CFG_LBE_POS); 125 if (cfg->dst_info.big_endian) 126 dst |= BIT(D40_SREG_CFG_LBE_POS); 127 128 *src_cfg = src; 129 *dst_cfg = dst; 130 } 131 132 static int d40_phy_fill_lli(struct d40_phy_lli *lli, 133 dma_addr_t data, 134 u32 data_size, 135 dma_addr_t next_lli, 136 u32 reg_cfg, 137 struct stedma40_half_channel_info *info, 138 unsigned int flags) 139 { 140 bool addr_inc = flags & LLI_ADDR_INC; 141 bool term_int = flags & LLI_TERM_INT; 142 unsigned int data_width = info->data_width; 143 int psize = info->psize; 144 int num_elems; 145 146 if (psize == STEDMA40_PSIZE_PHY_1) 147 num_elems = 1; 148 else 149 num_elems = 2 << psize; 150 151 /* Must be aligned */ 152 if (!IS_ALIGNED(data, data_width)) 153 return -EINVAL; 154 155 /* Transfer size can't be smaller than (num_elms * elem_size) */ 156 if (data_size < num_elems * data_width) 157 return -EINVAL; 158 159 /* The number of elements. IE now many chunks */ 160 lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS; 161 162 /* 163 * Distance to next element sized entry. 164 * Usually the size of the element unless you want gaps. 165 */ 166 if (addr_inc) 167 lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS; 168 169 /* Where the data is */ 170 lli->reg_ptr = data; 171 lli->reg_cfg = reg_cfg; 172 173 /* If this scatter list entry is the last one, no next link */ 174 if (next_lli == 0) 175 lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS); 176 else 177 lli->reg_lnk = next_lli; 178 179 /* Set/clear interrupt generation on this link item.*/ 180 if (term_int) 181 lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS); 182 else 183 lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS); 184 185 /* 186 * Post link - D40_SREG_LNK_PHY_PRE_POS = 0 187 * Relink happens after transfer completion. 188 */ 189 190 return 0; 191 } 192 193 static int d40_seg_size(int size, int data_width1, int data_width2) 194 { 195 u32 max_w = max(data_width1, data_width2); 196 u32 min_w = min(data_width1, data_width2); 197 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 198 199 if (seg_max > STEDMA40_MAX_SEG_SIZE) 200 seg_max -= max_w; 201 202 if (size <= seg_max) 203 return size; 204 205 if (size <= 2 * seg_max) 206 return ALIGN(size / 2, max_w); 207 208 return seg_max; 209 } 210 211 static struct d40_phy_lli * 212 d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, 213 dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, 214 struct stedma40_half_channel_info *info, 215 struct stedma40_half_channel_info *otherinfo, 216 unsigned long flags) 217 { 218 bool lastlink = flags & LLI_LAST_LINK; 219 bool addr_inc = flags & LLI_ADDR_INC; 220 bool term_int = flags & LLI_TERM_INT; 221 bool cyclic = flags & LLI_CYCLIC; 222 int err; 223 dma_addr_t next = lli_phys; 224 int size_rest = size; 225 int size_seg = 0; 226 227 /* 228 * This piece may be split up based on d40_seg_size(); we only want the 229 * term int on the last part. 230 */ 231 if (term_int) 232 flags &= ~LLI_TERM_INT; 233 234 do { 235 size_seg = d40_seg_size(size_rest, info->data_width, 236 otherinfo->data_width); 237 size_rest -= size_seg; 238 239 if (size_rest == 0 && term_int) 240 flags |= LLI_TERM_INT; 241 242 if (size_rest == 0 && lastlink) 243 next = cyclic ? first_phys : 0; 244 else 245 next = ALIGN(next + sizeof(struct d40_phy_lli), 246 D40_LLI_ALIGN); 247 248 err = d40_phy_fill_lli(lli, addr, size_seg, next, 249 reg_cfg, info, flags); 250 251 if (err) 252 goto err; 253 254 lli++; 255 if (addr_inc) 256 addr += size_seg; 257 } while (size_rest); 258 259 return lli; 260 261 err: 262 return NULL; 263 } 264 265 int d40_phy_sg_to_lli(struct scatterlist *sg, 266 int sg_len, 267 dma_addr_t target, 268 struct d40_phy_lli *lli_sg, 269 dma_addr_t lli_phys, 270 u32 reg_cfg, 271 struct stedma40_half_channel_info *info, 272 struct stedma40_half_channel_info *otherinfo, 273 unsigned long flags) 274 { 275 int total_size = 0; 276 int i; 277 struct scatterlist *current_sg = sg; 278 struct d40_phy_lli *lli = lli_sg; 279 dma_addr_t l_phys = lli_phys; 280 281 if (!target) 282 flags |= LLI_ADDR_INC; 283 284 for_each_sg(sg, current_sg, sg_len, i) { 285 dma_addr_t sg_addr = sg_dma_address(current_sg); 286 unsigned int len = sg_dma_len(current_sg); 287 dma_addr_t dst = target ?: sg_addr; 288 289 total_size += sg_dma_len(current_sg); 290 291 if (i == sg_len - 1) 292 flags |= LLI_TERM_INT | LLI_LAST_LINK; 293 294 l_phys = ALIGN(lli_phys + (lli - lli_sg) * 295 sizeof(struct d40_phy_lli), D40_LLI_ALIGN); 296 297 lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, 298 reg_cfg, info, otherinfo, flags); 299 300 if (lli == NULL) 301 return -EINVAL; 302 } 303 304 return total_size; 305 } 306 307 308 /* DMA logical lli operations */ 309 310 static void d40_log_lli_link(struct d40_log_lli *lli_dst, 311 struct d40_log_lli *lli_src, 312 int next, unsigned int flags) 313 { 314 bool interrupt = flags & LLI_TERM_INT; 315 u32 slos = 0; 316 u32 dlos = 0; 317 318 if (next != -EINVAL) { 319 slos = next * 2; 320 dlos = next * 2 + 1; 321 } 322 323 if (interrupt) { 324 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 325 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 326 } 327 328 lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | 329 (slos << D40_MEM_LCSP1_SLOS_POS); 330 331 lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | 332 (dlos << D40_MEM_LCSP1_SLOS_POS); 333 } 334 335 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 336 struct d40_log_lli *lli_dst, 337 struct d40_log_lli *lli_src, 338 int next, unsigned int flags) 339 { 340 d40_log_lli_link(lli_dst, lli_src, next, flags); 341 342 writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0); 343 writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1); 344 writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2); 345 writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3); 346 } 347 348 void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 349 struct d40_log_lli *lli_dst, 350 struct d40_log_lli *lli_src, 351 int next, unsigned int flags) 352 { 353 d40_log_lli_link(lli_dst, lli_src, next, flags); 354 355 writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02); 356 writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13); 357 writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02); 358 writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13); 359 } 360 361 static void d40_log_fill_lli(struct d40_log_lli *lli, 362 dma_addr_t data, u32 data_size, 363 u32 reg_cfg, 364 u32 data_width, 365 unsigned int flags) 366 { 367 bool addr_inc = flags & LLI_ADDR_INC; 368 369 lli->lcsp13 = reg_cfg; 370 371 /* The number of elements to transfer */ 372 lli->lcsp02 = ((data_size / data_width) << 373 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 374 375 BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE); 376 377 /* 16 LSBs address of the current element */ 378 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 379 /* 16 MSBs address of the current element */ 380 lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK; 381 382 if (addr_inc) 383 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; 384 385 } 386 387 static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, 388 dma_addr_t addr, 389 int size, 390 u32 lcsp13, /* src or dst*/ 391 u32 data_width1, 392 u32 data_width2, 393 unsigned int flags) 394 { 395 bool addr_inc = flags & LLI_ADDR_INC; 396 struct d40_log_lli *lli = lli_sg; 397 int size_rest = size; 398 int size_seg = 0; 399 400 do { 401 size_seg = d40_seg_size(size_rest, data_width1, data_width2); 402 size_rest -= size_seg; 403 404 d40_log_fill_lli(lli, 405 addr, 406 size_seg, 407 lcsp13, data_width1, 408 flags); 409 if (addr_inc) 410 addr += size_seg; 411 lli++; 412 } while (size_rest); 413 414 return lli; 415 } 416 417 int d40_log_sg_to_lli(struct scatterlist *sg, 418 int sg_len, 419 dma_addr_t dev_addr, 420 struct d40_log_lli *lli_sg, 421 u32 lcsp13, /* src or dst*/ 422 u32 data_width1, u32 data_width2) 423 { 424 int total_size = 0; 425 struct scatterlist *current_sg = sg; 426 int i; 427 struct d40_log_lli *lli = lli_sg; 428 unsigned long flags = 0; 429 430 if (!dev_addr) 431 flags |= LLI_ADDR_INC; 432 433 for_each_sg(sg, current_sg, sg_len, i) { 434 dma_addr_t sg_addr = sg_dma_address(current_sg); 435 unsigned int len = sg_dma_len(current_sg); 436 dma_addr_t addr = dev_addr ?: sg_addr; 437 438 total_size += sg_dma_len(current_sg); 439 440 lli = d40_log_buf_to_lli(lli, addr, len, 441 lcsp13, 442 data_width1, 443 data_width2, 444 flags); 445 } 446 447 return total_size; 448 } 449