1 /* 2 * Copyright 2008-2015 Freescale Semiconductor Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * * Redistributions of source code must retain the above copyright 7 * notice, this list of conditions and the following disclaimer. 8 * * Redistributions in binary form must reproduce the above copyright 9 * notice, this list of conditions and the following disclaimer in the 10 * documentation and/or other materials provided with the distribution. 11 * * Neither the name of Freescale Semiconductor nor the 12 * names of its contributors may be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * 16 * ALTERNATIVELY, this software may be distributed under the terms of the 17 * GNU General Public License ("GPL") as published by the Free Software 18 * Foundation, either version 2 of that License or (at your option) any 19 * later version. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include "fman.h" 36 #include "fman_muram.h" 37 38 #include <linux/slab.h> 39 #include <linux/delay.h> 40 #include <linux/module.h> 41 #include <linux/of_platform.h> 42 #include <linux/clk.h> 43 #include <linux/of_address.h> 44 #include <linux/of_irq.h> 45 #include <linux/interrupt.h> 46 #include <linux/libfdt_env.h> 47 48 /* General defines */ 49 #define FMAN_LIODN_TBL 64 /* size of LIODN table */ 50 #define MAX_NUM_OF_MACS 10 51 #define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 52 #define BASE_RX_PORTID 0x08 53 #define BASE_TX_PORTID 0x28 54 55 /* Modules registers offsets */ 56 #define BMI_OFFSET 0x00080000 57 #define QMI_OFFSET 0x00080400 58 #define DMA_OFFSET 0x000C2000 59 #define FPM_OFFSET 0x000C3000 60 #define IMEM_OFFSET 0x000C4000 61 #define CGP_OFFSET 0x000DB000 62 63 /* Exceptions bit map */ 64 #define EX_DMA_BUS_ERROR 0x80000000 65 #define EX_DMA_READ_ECC 0x40000000 66 #define EX_DMA_SYSTEM_WRITE_ECC 0x20000000 67 #define EX_DMA_FM_WRITE_ECC 0x10000000 68 #define EX_FPM_STALL_ON_TASKS 0x08000000 69 #define EX_FPM_SINGLE_ECC 0x04000000 70 #define EX_FPM_DOUBLE_ECC 0x02000000 71 #define EX_QMI_SINGLE_ECC 0x01000000 72 #define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 73 #define EX_QMI_DOUBLE_ECC 0x00400000 74 #define EX_BMI_LIST_RAM_ECC 0x00200000 75 #define EX_BMI_STORAGE_PROFILE_ECC 0x00100000 76 #define EX_BMI_STATISTICS_RAM_ECC 0x00080000 77 #define EX_IRAM_ECC 0x00040000 78 #define EX_MURAM_ECC 0x00020000 79 #define EX_BMI_DISPATCH_RAM_ECC 0x00010000 80 #define EX_DMA_SINGLE_PORT_ECC 0x00008000 81 82 /* DMA defines */ 83 /* masks */ 84 #define DMA_MODE_BER 0x00200000 85 #define DMA_MODE_ECC 0x00000020 86 #define DMA_MODE_SECURE_PROT 0x00000800 87 #define DMA_MODE_AXI_DBG_MASK 0x0F000000 88 89 #define DMA_TRANSFER_PORTID_MASK 0xFF000000 90 #define DMA_TRANSFER_TNUM_MASK 0x00FF0000 91 #define DMA_TRANSFER_LIODN_MASK 0x00000FFF 92 93 #define DMA_STATUS_BUS_ERR 0x08000000 94 #define DMA_STATUS_READ_ECC 0x04000000 95 #define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000 96 #define DMA_STATUS_FM_WRITE_ECC 0x01000000 97 #define DMA_STATUS_FM_SPDAT_ECC 0x00080000 98 99 #define DMA_MODE_CACHE_OR_SHIFT 30 100 #define DMA_MODE_AXI_DBG_SHIFT 24 101 #define DMA_MODE_CEN_SHIFT 13 102 #define DMA_MODE_CEN_MASK 0x00000007 103 #define DMA_MODE_DBG_SHIFT 7 104 #define DMA_MODE_AID_MODE_SHIFT 4 105 106 #define DMA_THRESH_COMMQ_SHIFT 24 107 #define DMA_THRESH_READ_INT_BUF_SHIFT 16 108 #define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f 109 #define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f 110 111 #define DMA_TRANSFER_PORTID_SHIFT 24 112 #define DMA_TRANSFER_TNUM_SHIFT 16 113 114 #define DMA_CAM_SIZEOF_ENTRY 0x40 115 #define DMA_CAM_UNITS 8 116 117 #define DMA_LIODN_SHIFT 16 118 #define DMA_LIODN_BASE_MASK 0x00000FFF 119 120 /* FPM defines */ 121 #define FPM_EV_MASK_DOUBLE_ECC 0x80000000 122 #define FPM_EV_MASK_STALL 0x40000000 123 #define FPM_EV_MASK_SINGLE_ECC 0x20000000 124 #define FPM_EV_MASK_RELEASE_FM 0x00010000 125 #define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000 126 #define FPM_EV_MASK_STALL_EN 0x00004000 127 #define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000 128 #define FPM_EV_MASK_EXTERNAL_HALT 0x00000008 129 #define FPM_EV_MASK_ECC_ERR_HALT 0x00000004 130 131 #define FPM_RAM_MURAM_ECC 0x00008000 132 #define FPM_RAM_IRAM_ECC 0x00004000 133 #define FPM_IRAM_ECC_ERR_EX_EN 0x00020000 134 #define FPM_MURAM_ECC_ERR_EX_EN 0x00040000 135 #define FPM_RAM_IRAM_ECC_EN 0x40000000 136 #define FPM_RAM_RAMS_ECC_EN 0x80000000 137 #define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000 138 139 #define FPM_REV1_MAJOR_MASK 0x0000FF00 140 #define FPM_REV1_MINOR_MASK 0x000000FF 141 142 #define FPM_DISP_LIMIT_SHIFT 24 143 144 #define FPM_PRT_FM_CTL1 0x00000001 145 #define FPM_PRT_FM_CTL2 0x00000002 146 #define FPM_PORT_FM_CTL_PORTID_SHIFT 24 147 #define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16 148 149 #define FPM_THR1_PRS_SHIFT 24 150 #define FPM_THR1_KG_SHIFT 16 151 #define FPM_THR1_PLCR_SHIFT 8 152 #define FPM_THR1_BMI_SHIFT 0 153 154 #define FPM_THR2_QMI_ENQ_SHIFT 24 155 #define FPM_THR2_QMI_DEQ_SHIFT 0 156 #define FPM_THR2_FM_CTL1_SHIFT 16 157 #define FPM_THR2_FM_CTL2_SHIFT 8 158 159 #define FPM_EV_MASK_CAT_ERR_SHIFT 1 160 #define FPM_EV_MASK_DMA_ERR_SHIFT 0 161 162 #define FPM_REV1_MAJOR_SHIFT 8 163 164 #define FPM_RSTC_FM_RESET 0x80000000 165 #define FPM_RSTC_MAC0_RESET 0x40000000 166 #define FPM_RSTC_MAC1_RESET 0x20000000 167 #define FPM_RSTC_MAC2_RESET 0x10000000 168 #define FPM_RSTC_MAC3_RESET 0x08000000 169 #define FPM_RSTC_MAC8_RESET 0x04000000 170 #define FPM_RSTC_MAC4_RESET 0x02000000 171 #define FPM_RSTC_MAC5_RESET 0x01000000 172 #define FPM_RSTC_MAC6_RESET 0x00800000 173 #define FPM_RSTC_MAC7_RESET 0x00400000 174 #define FPM_RSTC_MAC9_RESET 0x00200000 175 176 #define FPM_TS_INT_SHIFT 16 177 #define FPM_TS_CTL_EN 0x80000000 178 179 /* BMI defines */ 180 #define BMI_INIT_START 0x80000000 181 #define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000 182 #define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000 183 #define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000 184 #define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000 185 #define BMI_NUM_OF_TASKS_MASK 0x3F000000 186 #define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000 187 #define BMI_NUM_OF_DMAS_MASK 0x00000F00 188 #define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F 189 #define BMI_FIFO_SIZE_MASK 0x000003FF 190 #define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000 191 #define BMI_CFG2_DMAS_MASK 0x0000003F 192 #define BMI_CFG2_TASKS_MASK 0x0000003F 193 194 #define BMI_CFG2_TASKS_SHIFT 16 195 #define BMI_CFG2_DMAS_SHIFT 0 196 #define BMI_CFG1_FIFO_SIZE_SHIFT 16 197 #define BMI_NUM_OF_TASKS_SHIFT 24 198 #define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16 199 #define BMI_NUM_OF_DMAS_SHIFT 8 200 #define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0 201 202 #define BMI_FIFO_ALIGN 0x100 203 204 #define BMI_EXTRA_FIFO_SIZE_SHIFT 16 205 206 /* QMI defines */ 207 #define QMI_CFG_ENQ_EN 0x80000000 208 #define QMI_CFG_DEQ_EN 0x40000000 209 #define QMI_CFG_EN_COUNTERS 0x10000000 210 #define QMI_CFG_DEQ_MASK 0x0000003F 211 #define QMI_CFG_ENQ_MASK 0x00003F00 212 #define QMI_CFG_ENQ_SHIFT 8 213 214 #define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000 215 #define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000 216 #define QMI_INTR_EN_SINGLE_ECC 0x80000000 217 218 #define QMI_GS_HALT_NOT_BUSY 0x00000002 219 220 /* IRAM defines */ 221 #define IRAM_IADD_AIE 0x80000000 222 #define IRAM_READY 0x80000000 223 224 /* Default values */ 225 #define DEFAULT_CATASTROPHIC_ERR 0 226 #define DEFAULT_DMA_ERR 0 227 #define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM 228 #define DEFAULT_DMA_COMM_Q_LOW 0x2A 229 #define DEFAULT_DMA_COMM_Q_HIGH 0x3F 230 #define DEFAULT_CACHE_OVERRIDE 0 231 #define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64 232 #define DEFAULT_DMA_DBG_CNT_MODE 0 233 #define DEFAULT_DMA_SOS_EMERGENCY 0 234 #define DEFAULT_DMA_WATCHDOG 0 235 #define DEFAULT_DISP_LIMIT 0 236 #define DEFAULT_PRS_DISP_TH 16 237 #define DEFAULT_PLCR_DISP_TH 16 238 #define DEFAULT_KG_DISP_TH 16 239 #define DEFAULT_BMI_DISP_TH 16 240 #define DEFAULT_QMI_ENQ_DISP_TH 16 241 #define DEFAULT_QMI_DEQ_DISP_TH 16 242 #define DEFAULT_FM_CTL1_DISP_TH 16 243 #define DEFAULT_FM_CTL2_DISP_TH 16 244 245 #define DFLT_AXI_DBG_NUM_OF_BEATS 1 246 247 #define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \ 248 ((dma_thresh_max_buf + 1) / 2) 249 #define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \ 250 ((dma_thresh_max_buf + 1) * 3 / 4) 251 #define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \ 252 ((dma_thresh_max_buf + 1) / 2) 253 #define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\ 254 ((dma_thresh_max_buf + 1) * 3 / 4) 255 256 #define DMA_COMM_Q_LOW_FMAN_V3 0x2A 257 #define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \ 258 ((dma_thresh_max_commq + 1) / 2) 259 #define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \ 260 ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \ 261 DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)) 262 263 #define DMA_COMM_Q_HIGH_FMAN_V3 0x3f 264 #define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \ 265 ((dma_thresh_max_commq + 1) * 3 / 4) 266 #define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \ 267 ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \ 268 DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)) 269 270 #define TOTAL_NUM_OF_TASKS_FMAN_V3L 59 271 #define TOTAL_NUM_OF_TASKS_FMAN_V3H 124 272 #define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \ 273 ((major == 6) ? ((minor == 1 || minor == 4) ? \ 274 TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \ 275 bmi_max_num_of_tasks) 276 277 #define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64 278 #define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32 279 #define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \ 280 (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \ 281 DMA_CAM_NUM_OF_ENTRIES_FMAN_V2) 282 283 #define FM_TIMESTAMP_1_USEC_BIT 8 284 285 /* Defines used for enabling/disabling FMan interrupts */ 286 #define ERR_INTR_EN_DMA 0x00010000 287 #define ERR_INTR_EN_FPM 0x80000000 288 #define ERR_INTR_EN_BMI 0x00800000 289 #define ERR_INTR_EN_QMI 0x00400000 290 #define ERR_INTR_EN_MURAM 0x00040000 291 #define ERR_INTR_EN_MAC0 0x00004000 292 #define ERR_INTR_EN_MAC1 0x00002000 293 #define ERR_INTR_EN_MAC2 0x00001000 294 #define ERR_INTR_EN_MAC3 0x00000800 295 #define ERR_INTR_EN_MAC4 0x00000400 296 #define ERR_INTR_EN_MAC5 0x00000200 297 #define ERR_INTR_EN_MAC6 0x00000100 298 #define ERR_INTR_EN_MAC7 0x00000080 299 #define ERR_INTR_EN_MAC8 0x00008000 300 #define ERR_INTR_EN_MAC9 0x00000040 301 302 #define INTR_EN_QMI 0x40000000 303 #define INTR_EN_MAC0 0x00080000 304 #define INTR_EN_MAC1 0x00040000 305 #define INTR_EN_MAC2 0x00020000 306 #define INTR_EN_MAC3 0x00010000 307 #define INTR_EN_MAC4 0x00000040 308 #define INTR_EN_MAC5 0x00000020 309 #define INTR_EN_MAC6 0x00000008 310 #define INTR_EN_MAC7 0x00000002 311 #define INTR_EN_MAC8 0x00200000 312 #define INTR_EN_MAC9 0x00100000 313 #define INTR_EN_REV0 0x00008000 314 #define INTR_EN_REV1 0x00004000 315 #define INTR_EN_REV2 0x00002000 316 #define INTR_EN_REV3 0x00001000 317 #define INTR_EN_TMR 0x01000000 318 319 enum fman_dma_aid_mode { 320 FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */ 321 FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */ 322 }; 323 324 struct fman_iram_regs { 325 u32 iadd; /* FM IRAM instruction address register */ 326 u32 idata; /* FM IRAM instruction data register */ 327 u32 itcfg; /* FM IRAM timing config register */ 328 u32 iready; /* FM IRAM ready register */ 329 }; 330 331 struct fman_fpm_regs { 332 u32 fmfp_tnc; /* FPM TNUM Control 0x00 */ 333 u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */ 334 u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */ 335 u32 fmfp_mxd; /* FPM Flush Control 0x0c */ 336 u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */ 337 u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */ 338 u32 fm_epi; /* FM Error Pending Interrupts 0x18 */ 339 u32 fm_rie; /* FM Error Interrupt Enable 0x1c */ 340 u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */ 341 u32 res0030[4]; /* res 0x30 - 0x3f */ 342 u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */ 343 u32 res0050[4]; /* res 0x50-0x5f */ 344 u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */ 345 u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */ 346 u32 fmfp_tsp; /* FPM Time Stamp 0x68 */ 347 u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */ 348 u32 fm_rcr; /* FM Rams Control 0x70 */ 349 u32 fmfp_extc; /* FPM External Requests Control 0x74 */ 350 u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */ 351 u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */ 352 u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */ 353 u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */ 354 u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */ 355 u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */ 356 u32 fm_rstc; /* FM Reset Command 0xcc */ 357 u32 fm_cld; /* FM Classifier Debug 0xd0 */ 358 u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */ 359 u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */ 360 u32 fmfp_ee; /* FPM Event&Mask 0xdc */ 361 u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */ 362 u32 res00f0[4]; /* res 0xf0-0xff */ 363 u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */ 364 u32 res01c8[14]; /* res 0x1c8-0x1ff */ 365 u32 fmfp_clfabc; /* FPM CLFABC 0x200 */ 366 u32 fmfp_clfcc; /* FPM CLFCC 0x204 */ 367 u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */ 368 u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */ 369 u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */ 370 u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */ 371 u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */ 372 u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */ 373 u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */ 374 u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */ 375 u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */ 376 u32 fmfp_decceh; /* FPM DECCEH 0x22c */ 377 u32 res0230[116]; /* res 0x230 - 0x3ff */ 378 u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */ 379 u32 res0600[0x400 - 384]; 380 }; 381 382 struct fman_bmi_regs { 383 u32 fmbm_init; /* BMI Initialization 0x00 */ 384 u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */ 385 u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */ 386 u32 res000c[5]; /* 0x0c - 0x1f */ 387 u32 fmbm_ievr; /* Interrupt Event Register 0x20 */ 388 u32 fmbm_ier; /* Interrupt Enable Register 0x24 */ 389 u32 fmbm_ifr; /* Interrupt Force Register 0x28 */ 390 u32 res002c[5]; /* 0x2c - 0x3f */ 391 u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */ 392 u32 res0060[12]; /* 0x60 - 0x8f */ 393 u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */ 394 u32 res009c; /* 0x9c */ 395 u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */ 396 u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */ 397 u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */ 398 u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */ 399 u32 res0200; /* 0x200 */ 400 u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */ 401 u32 res0300; /* 0x300 */ 402 u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */ 403 }; 404 405 struct fman_qmi_regs { 406 u32 fmqm_gc; /* General Configuration Register 0x00 */ 407 u32 res0004; /* 0x04 */ 408 u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */ 409 u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */ 410 u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */ 411 u32 fmqm_ie; /* Interrupt Event Register 0x14 */ 412 u32 fmqm_ien; /* Interrupt Enable Register 0x18 */ 413 u32 fmqm_if; /* Interrupt Force Register 0x1c */ 414 u32 fmqm_gs; /* Global Status Register 0x20 */ 415 u32 fmqm_ts; /* Task Status Register 0x24 */ 416 u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */ 417 u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */ 418 u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */ 419 u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */ 420 u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */ 421 u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */ 422 u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */ 423 u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */ 424 u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */ 425 u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */ 426 u32 res0050[7]; /* 0x50 - 0x6b */ 427 u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */ 428 u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */ 429 u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */ 430 u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */ 431 u32 res007c; /* 0x7c */ 432 u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */ 433 u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */ 434 u32 res0088[2]; /* 0x88 - 0x8f */ 435 struct { 436 u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */ 437 u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */ 438 u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */ 439 u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */ 440 u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */ 441 u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */ 442 u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */ 443 u32 res001c; /* 0x1c */ 444 } dbg_traps[3]; /* 0x90 - 0xef */ 445 u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */ 446 }; 447 448 struct fman_dma_regs { 449 u32 fmdmsr; /* FM DMA status register 0x00 */ 450 u32 fmdmmr; /* FM DMA mode register 0x04 */ 451 u32 fmdmtr; /* FM DMA bus threshold register 0x08 */ 452 u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */ 453 u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */ 454 u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */ 455 u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */ 456 u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */ 457 u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */ 458 u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */ 459 u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */ 460 u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */ 461 u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */ 462 u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */ 463 u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */ 464 u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */ 465 u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */ 466 u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */ 467 u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */ 468 u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */ 469 u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */ 470 u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */ 471 u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */ 472 u32 res005c; /* 0x5c */ 473 u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */ 474 u32 res00e0[0x400 - 56]; 475 }; 476 477 /* Structure that holds current FMan state. 478 * Used for saving run time information. 479 */ 480 struct fman_state_struct { 481 u8 fm_id; 482 u16 fm_clk_freq; 483 struct fman_rev_info rev_info; 484 bool enabled_time_stamp; 485 u8 count1_micro_bit; 486 u8 total_num_of_tasks; 487 u8 accumulated_num_of_tasks; 488 u32 accumulated_fifo_size; 489 u8 accumulated_num_of_open_dmas; 490 u8 accumulated_num_of_deq_tnums; 491 u32 exceptions; 492 u32 extra_fifo_pool_size; 493 u8 extra_tasks_pool_size; 494 u8 extra_open_dmas_pool_size; 495 u16 port_mfl[MAX_NUM_OF_MACS]; 496 u16 mac_mfl[MAX_NUM_OF_MACS]; 497 498 /* SOC specific */ 499 u32 fm_iram_size; 500 /* DMA */ 501 u32 dma_thresh_max_commq; 502 u32 dma_thresh_max_buf; 503 u32 max_num_of_open_dmas; 504 /* QMI */ 505 u32 qmi_max_num_of_tnums; 506 u32 qmi_def_tnums_thresh; 507 /* BMI */ 508 u32 bmi_max_num_of_tasks; 509 u32 bmi_max_fifo_size; 510 /* General */ 511 u32 fm_port_num_of_cg; 512 u32 num_of_rx_ports; 513 u32 total_fifo_size; 514 515 u32 qman_channel_base; 516 u32 num_of_qman_channels; 517 518 struct resource *res; 519 }; 520 521 /* Structure that holds FMan initial configuration */ 522 struct fman_cfg { 523 u8 disp_limit_tsh; 524 u8 prs_disp_tsh; 525 u8 plcr_disp_tsh; 526 u8 kg_disp_tsh; 527 u8 bmi_disp_tsh; 528 u8 qmi_enq_disp_tsh; 529 u8 qmi_deq_disp_tsh; 530 u8 fm_ctl1_disp_tsh; 531 u8 fm_ctl2_disp_tsh; 532 int dma_cache_override; 533 enum fman_dma_aid_mode dma_aid_mode; 534 u32 dma_axi_dbg_num_of_beats; 535 u32 dma_cam_num_of_entries; 536 u32 dma_watchdog; 537 u8 dma_comm_qtsh_asrt_emer; 538 u32 dma_write_buf_tsh_asrt_emer; 539 u32 dma_read_buf_tsh_asrt_emer; 540 u8 dma_comm_qtsh_clr_emer; 541 u32 dma_write_buf_tsh_clr_emer; 542 u32 dma_read_buf_tsh_clr_emer; 543 u32 dma_sos_emergency; 544 int dma_dbg_cnt_mode; 545 int catastrophic_err; 546 int dma_err; 547 u32 exceptions; 548 u16 clk_freq; 549 u32 cam_base_addr; 550 u32 fifo_base_addr; 551 u32 total_fifo_size; 552 u32 total_num_of_tasks; 553 u32 qmi_def_tnums_thresh; 554 }; 555 556 /* Structure that holds information received from device tree */ 557 struct fman_dts_params { 558 void __iomem *base_addr; /* FMan virtual address */ 559 struct resource *res; /* FMan memory resource */ 560 u8 id; /* FMan ID */ 561 562 int err_irq; /* FMan Error IRQ */ 563 564 u16 clk_freq; /* FMan clock freq (In Mhz) */ 565 566 u32 qman_channel_base; /* QMan channels base */ 567 u32 num_of_qman_channels; /* Number of QMan channels */ 568 569 struct resource muram_res; /* MURAM resource */ 570 }; 571 572 /** fman_exceptions_cb 573 * fman - Pointer to FMan 574 * exception - The exception. 575 * 576 * Exceptions user callback routine, will be called upon an exception 577 * passing the exception identification. 578 * 579 * Return: irq status 580 */ 581 typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman, 582 enum fman_exceptions exception); 583 584 /** fman_bus_error_cb 585 * fman - Pointer to FMan 586 * port_id - Port id 587 * addr - Address that caused the error 588 * tnum - Owner of error 589 * liodn - Logical IO device number 590 * 591 * Bus error user callback routine, will be called upon bus error, 592 * passing parameters describing the errors and the owner. 593 * 594 * Return: IRQ status 595 */ 596 typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id, 597 u64 addr, u8 tnum, u16 liodn); 598 599 struct fman { 600 struct device *dev; 601 void __iomem *base_addr; 602 struct fman_intr_src intr_mng[FMAN_EV_CNT]; 603 604 struct fman_fpm_regs __iomem *fpm_regs; 605 struct fman_bmi_regs __iomem *bmi_regs; 606 struct fman_qmi_regs __iomem *qmi_regs; 607 struct fman_dma_regs __iomem *dma_regs; 608 fman_exceptions_cb *exception_cb; 609 fman_bus_error_cb *bus_error_cb; 610 /* Spinlock for FMan use */ 611 spinlock_t spinlock; 612 struct fman_state_struct *state; 613 614 struct fman_cfg *cfg; 615 struct muram_info *muram; 616 /* cam section in muram */ 617 int cam_offset; 618 size_t cam_size; 619 /* Fifo in MURAM */ 620 int fifo_offset; 621 size_t fifo_size; 622 623 u32 liodn_base[64]; 624 u32 liodn_offset[64]; 625 626 struct fman_dts_params dts_params; 627 }; 628 629 static irqreturn_t fman_exceptions(struct fman *fman, 630 enum fman_exceptions exception) 631 { 632 dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n", 633 __func__, fman->state->fm_id, exception); 634 635 return IRQ_HANDLED; 636 } 637 638 static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id, 639 u64 __maybe_unused addr, 640 u8 __maybe_unused tnum, 641 u16 __maybe_unused liodn) 642 { 643 dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n", 644 __func__, fman->state->fm_id, port_id); 645 646 return IRQ_HANDLED; 647 } 648 649 static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id) 650 { 651 if (fman->intr_mng[id].isr_cb) { 652 fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle); 653 654 return IRQ_HANDLED; 655 } 656 657 return IRQ_NONE; 658 } 659 660 static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id) 661 { 662 u8 sw_port_id = 0; 663 664 if (hw_port_id >= BASE_TX_PORTID) 665 sw_port_id = hw_port_id - BASE_TX_PORTID; 666 else if (hw_port_id >= BASE_RX_PORTID) 667 sw_port_id = hw_port_id - BASE_RX_PORTID; 668 else 669 sw_port_id = 0; 670 671 return sw_port_id; 672 } 673 674 static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg, 675 u8 port_id) 676 { 677 u32 tmp = 0; 678 679 tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT; 680 681 tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1; 682 683 /* order restoration */ 684 if (port_id % 2) 685 tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; 686 else 687 tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT; 688 689 iowrite32be(tmp, &fpm_rg->fmfp_prc); 690 } 691 692 static void set_port_liodn(struct fman *fman, u8 port_id, 693 u32 liodn_base, u32 liodn_ofst) 694 { 695 u32 tmp; 696 697 /* set LIODN base for this port */ 698 tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]); 699 if (port_id % 2) { 700 tmp &= ~DMA_LIODN_BASE_MASK; 701 tmp |= liodn_base; 702 } else { 703 tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT); 704 tmp |= liodn_base << DMA_LIODN_SHIFT; 705 } 706 iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]); 707 iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]); 708 } 709 710 static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) 711 { 712 u32 tmp; 713 714 tmp = ioread32be(&fpm_rg->fm_rcr); 715 if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) 716 iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 717 else 718 iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN | 719 FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 720 } 721 722 static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg) 723 { 724 u32 tmp; 725 726 tmp = ioread32be(&fpm_rg->fm_rcr); 727 if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL) 728 iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr); 729 else 730 iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN), 731 &fpm_rg->fm_rcr); 732 } 733 734 static void fman_defconfig(struct fman_cfg *cfg) 735 { 736 memset(cfg, 0, sizeof(struct fman_cfg)); 737 738 cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR; 739 cfg->dma_err = DEFAULT_DMA_ERR; 740 cfg->dma_aid_mode = DEFAULT_AID_MODE; 741 cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW; 742 cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH; 743 cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE; 744 cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES; 745 cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE; 746 cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY; 747 cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG; 748 cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT; 749 cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH; 750 cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH; 751 cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH; 752 cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH; 753 cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH; 754 cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH; 755 cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH; 756 cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH; 757 } 758 759 static int dma_init(struct fman *fman) 760 { 761 struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; 762 struct fman_cfg *cfg = fman->cfg; 763 u32 tmp_reg; 764 765 /* Init DMA Registers */ 766 767 /* clear status reg events */ 768 tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC | 769 DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC); 770 iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr); 771 772 /* configure mode register */ 773 tmp_reg = 0; 774 tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT; 775 if (cfg->exceptions & EX_DMA_BUS_ERROR) 776 tmp_reg |= DMA_MODE_BER; 777 if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) | 778 (cfg->exceptions & EX_DMA_READ_ECC) | 779 (cfg->exceptions & EX_DMA_FM_WRITE_ECC)) 780 tmp_reg |= DMA_MODE_ECC; 781 if (cfg->dma_axi_dbg_num_of_beats) 782 tmp_reg |= (DMA_MODE_AXI_DBG_MASK & 783 ((cfg->dma_axi_dbg_num_of_beats - 1) 784 << DMA_MODE_AXI_DBG_SHIFT)); 785 786 tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) & 787 DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT; 788 tmp_reg |= DMA_MODE_SECURE_PROT; 789 tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT; 790 tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT; 791 792 iowrite32be(tmp_reg, &dma_rg->fmdmmr); 793 794 /* configure thresholds register */ 795 tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer << 796 DMA_THRESH_COMMQ_SHIFT); 797 tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer & 798 DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; 799 tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer & 800 DMA_THRESH_WRITE_INT_BUF_MASK; 801 802 iowrite32be(tmp_reg, &dma_rg->fmdmtr); 803 804 /* configure hysteresis register */ 805 tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer << 806 DMA_THRESH_COMMQ_SHIFT); 807 tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer & 808 DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT; 809 tmp_reg |= cfg->dma_write_buf_tsh_clr_emer & 810 DMA_THRESH_WRITE_INT_BUF_MASK; 811 812 iowrite32be(tmp_reg, &dma_rg->fmdmhy); 813 814 /* configure emergency threshold */ 815 iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr); 816 817 /* configure Watchdog */ 818 iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr); 819 820 iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr); 821 822 /* Allocate MURAM for CAM */ 823 fman->cam_size = 824 (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY); 825 fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size); 826 if (IS_ERR_VALUE(fman->cam_offset)) { 827 dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", 828 __func__); 829 return -ENOMEM; 830 } 831 832 if (fman->state->rev_info.major == 2) { 833 u32 __iomem *cam_base_addr; 834 835 fman_muram_free_mem(fman->muram, fman->cam_offset, 836 fman->cam_size); 837 838 fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128; 839 fman->cam_offset = fman_muram_alloc(fman->muram, 840 fman->cam_size); 841 if (IS_ERR_VALUE(fman->cam_offset)) { 842 dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n", 843 __func__); 844 return -ENOMEM; 845 } 846 847 if (fman->cfg->dma_cam_num_of_entries % 8 || 848 fman->cfg->dma_cam_num_of_entries > 32) { 849 dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n", 850 __func__); 851 return -EINVAL; 852 } 853 854 cam_base_addr = (u32 __iomem *) 855 fman_muram_offset_to_vbase(fman->muram, 856 fman->cam_offset); 857 iowrite32be(~((1 << 858 (32 - fman->cfg->dma_cam_num_of_entries)) - 1), 859 cam_base_addr); 860 } 861 862 fman->cfg->cam_base_addr = fman->cam_offset; 863 864 return 0; 865 } 866 867 static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg) 868 { 869 u32 tmp_reg; 870 int i; 871 872 /* Init FPM Registers */ 873 874 tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT); 875 iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd); 876 877 tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) | 878 ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) | 879 ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) | 880 ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT)); 881 iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1); 882 883 tmp_reg = 884 (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) | 885 ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) | 886 ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) | 887 ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT)); 888 iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2); 889 890 /* define exceptions and error behavior */ 891 tmp_reg = 0; 892 /* Clear events */ 893 tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC | 894 FPM_EV_MASK_SINGLE_ECC); 895 /* enable interrupts */ 896 if (cfg->exceptions & EX_FPM_STALL_ON_TASKS) 897 tmp_reg |= FPM_EV_MASK_STALL_EN; 898 if (cfg->exceptions & EX_FPM_SINGLE_ECC) 899 tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN; 900 if (cfg->exceptions & EX_FPM_DOUBLE_ECC) 901 tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN; 902 tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT); 903 tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT); 904 /* FMan is not halted upon external halt activation */ 905 tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT; 906 /* Man is not halted upon Unrecoverable ECC error behavior */ 907 tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT; 908 iowrite32be(tmp_reg, &fpm_rg->fmfp_ee); 909 910 /* clear all fmCtls event registers */ 911 for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++) 912 iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]); 913 914 /* RAM ECC - enable and clear events */ 915 /* first we need to clear all parser memory, 916 * as it is uninitialized and may cause ECC errors 917 */ 918 /* event bits */ 919 tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC); 920 921 iowrite32be(tmp_reg, &fpm_rg->fm_rcr); 922 923 tmp_reg = 0; 924 if (cfg->exceptions & EX_IRAM_ECC) { 925 tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN; 926 enable_rams_ecc(fpm_rg); 927 } 928 if (cfg->exceptions & EX_MURAM_ECC) { 929 tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN; 930 enable_rams_ecc(fpm_rg); 931 } 932 iowrite32be(tmp_reg, &fpm_rg->fm_rie); 933 } 934 935 static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg, 936 struct fman_cfg *cfg) 937 { 938 u32 tmp_reg; 939 940 /* Init BMI Registers */ 941 942 /* define common resources */ 943 tmp_reg = cfg->fifo_base_addr; 944 tmp_reg = tmp_reg / BMI_FIFO_ALIGN; 945 946 tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) << 947 BMI_CFG1_FIFO_SIZE_SHIFT); 948 iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1); 949 950 tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) << 951 BMI_CFG2_TASKS_SHIFT; 952 /* num of DMA's will be dynamically updated when each port is set */ 953 iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2); 954 955 /* define unmaskable exceptions, enable and clear events */ 956 tmp_reg = 0; 957 iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC | 958 BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC | 959 BMI_ERR_INTR_EN_STATISTICS_RAM_ECC | 960 BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr); 961 962 if (cfg->exceptions & EX_BMI_LIST_RAM_ECC) 963 tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; 964 if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC) 965 tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 966 if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC) 967 tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 968 if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC) 969 tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 970 iowrite32be(tmp_reg, &bmi_rg->fmbm_ier); 971 } 972 973 static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, 974 struct fman_cfg *cfg) 975 { 976 u32 tmp_reg; 977 978 /* Init QMI Registers */ 979 980 /* Clear error interrupt events */ 981 982 iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF, 983 &qmi_rg->fmqm_eie); 984 tmp_reg = 0; 985 if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID) 986 tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; 987 if (cfg->exceptions & EX_QMI_DOUBLE_ECC) 988 tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC; 989 /* enable events */ 990 iowrite32be(tmp_reg, &qmi_rg->fmqm_eien); 991 992 tmp_reg = 0; 993 /* Clear interrupt events */ 994 iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie); 995 if (cfg->exceptions & EX_QMI_SINGLE_ECC) 996 tmp_reg |= QMI_INTR_EN_SINGLE_ECC; 997 /* enable events */ 998 iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); 999 } 1000 1001 static int enable(struct fman *fman, struct fman_cfg *cfg) 1002 { 1003 u32 cfg_reg = 0; 1004 1005 /* Enable all modules */ 1006 1007 /* clear&enable global counters - calculate reg and save for later, 1008 * because it's the same reg for QMI enable 1009 */ 1010 cfg_reg = QMI_CFG_EN_COUNTERS; 1011 1012 /* Set enqueue and dequeue thresholds */ 1013 cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh; 1014 1015 iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init); 1016 iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN, 1017 &fman->qmi_regs->fmqm_gc); 1018 1019 return 0; 1020 } 1021 1022 static int set_exception(struct fman *fman, 1023 enum fman_exceptions exception, bool enable) 1024 { 1025 u32 tmp; 1026 1027 switch (exception) { 1028 case FMAN_EX_DMA_BUS_ERROR: 1029 tmp = ioread32be(&fman->dma_regs->fmdmmr); 1030 if (enable) 1031 tmp |= DMA_MODE_BER; 1032 else 1033 tmp &= ~DMA_MODE_BER; 1034 /* disable bus error */ 1035 iowrite32be(tmp, &fman->dma_regs->fmdmmr); 1036 break; 1037 case FMAN_EX_DMA_READ_ECC: 1038 case FMAN_EX_DMA_SYSTEM_WRITE_ECC: 1039 case FMAN_EX_DMA_FM_WRITE_ECC: 1040 tmp = ioread32be(&fman->dma_regs->fmdmmr); 1041 if (enable) 1042 tmp |= DMA_MODE_ECC; 1043 else 1044 tmp &= ~DMA_MODE_ECC; 1045 iowrite32be(tmp, &fman->dma_regs->fmdmmr); 1046 break; 1047 case FMAN_EX_FPM_STALL_ON_TASKS: 1048 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 1049 if (enable) 1050 tmp |= FPM_EV_MASK_STALL_EN; 1051 else 1052 tmp &= ~FPM_EV_MASK_STALL_EN; 1053 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 1054 break; 1055 case FMAN_EX_FPM_SINGLE_ECC: 1056 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 1057 if (enable) 1058 tmp |= FPM_EV_MASK_SINGLE_ECC_EN; 1059 else 1060 tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN; 1061 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 1062 break; 1063 case FMAN_EX_FPM_DOUBLE_ECC: 1064 tmp = ioread32be(&fman->fpm_regs->fmfp_ee); 1065 if (enable) 1066 tmp |= FPM_EV_MASK_DOUBLE_ECC_EN; 1067 else 1068 tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN; 1069 iowrite32be(tmp, &fman->fpm_regs->fmfp_ee); 1070 break; 1071 case FMAN_EX_QMI_SINGLE_ECC: 1072 tmp = ioread32be(&fman->qmi_regs->fmqm_ien); 1073 if (enable) 1074 tmp |= QMI_INTR_EN_SINGLE_ECC; 1075 else 1076 tmp &= ~QMI_INTR_EN_SINGLE_ECC; 1077 iowrite32be(tmp, &fman->qmi_regs->fmqm_ien); 1078 break; 1079 case FMAN_EX_QMI_DOUBLE_ECC: 1080 tmp = ioread32be(&fman->qmi_regs->fmqm_eien); 1081 if (enable) 1082 tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC; 1083 else 1084 tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC; 1085 iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); 1086 break; 1087 case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: 1088 tmp = ioread32be(&fman->qmi_regs->fmqm_eien); 1089 if (enable) 1090 tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; 1091 else 1092 tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF; 1093 iowrite32be(tmp, &fman->qmi_regs->fmqm_eien); 1094 break; 1095 case FMAN_EX_BMI_LIST_RAM_ECC: 1096 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1097 if (enable) 1098 tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC; 1099 else 1100 tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC; 1101 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1102 break; 1103 case FMAN_EX_BMI_STORAGE_PROFILE_ECC: 1104 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1105 if (enable) 1106 tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 1107 else 1108 tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC; 1109 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1110 break; 1111 case FMAN_EX_BMI_STATISTICS_RAM_ECC: 1112 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1113 if (enable) 1114 tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 1115 else 1116 tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; 1117 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1118 break; 1119 case FMAN_EX_BMI_DISPATCH_RAM_ECC: 1120 tmp = ioread32be(&fman->bmi_regs->fmbm_ier); 1121 if (enable) 1122 tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 1123 else 1124 tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; 1125 iowrite32be(tmp, &fman->bmi_regs->fmbm_ier); 1126 break; 1127 case FMAN_EX_IRAM_ECC: 1128 tmp = ioread32be(&fman->fpm_regs->fm_rie); 1129 if (enable) { 1130 /* enable ECC if not enabled */ 1131 enable_rams_ecc(fman->fpm_regs); 1132 /* enable ECC interrupts */ 1133 tmp |= FPM_IRAM_ECC_ERR_EX_EN; 1134 } else { 1135 /* ECC mechanism may be disabled, 1136 * depending on driver status 1137 */ 1138 disable_rams_ecc(fman->fpm_regs); 1139 tmp &= ~FPM_IRAM_ECC_ERR_EX_EN; 1140 } 1141 iowrite32be(tmp, &fman->fpm_regs->fm_rie); 1142 break; 1143 case FMAN_EX_MURAM_ECC: 1144 tmp = ioread32be(&fman->fpm_regs->fm_rie); 1145 if (enable) { 1146 /* enable ECC if not enabled */ 1147 enable_rams_ecc(fman->fpm_regs); 1148 /* enable ECC interrupts */ 1149 tmp |= FPM_MURAM_ECC_ERR_EX_EN; 1150 } else { 1151 /* ECC mechanism may be disabled, 1152 * depending on driver status 1153 */ 1154 disable_rams_ecc(fman->fpm_regs); 1155 tmp &= ~FPM_MURAM_ECC_ERR_EX_EN; 1156 } 1157 iowrite32be(tmp, &fman->fpm_regs->fm_rie); 1158 break; 1159 default: 1160 return -EINVAL; 1161 } 1162 return 0; 1163 } 1164 1165 static void resume(struct fman_fpm_regs __iomem *fpm_rg) 1166 { 1167 u32 tmp; 1168 1169 tmp = ioread32be(&fpm_rg->fmfp_ee); 1170 /* clear tmp_reg event bits in order not to clear standing events */ 1171 tmp &= ~(FPM_EV_MASK_DOUBLE_ECC | 1172 FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC); 1173 tmp |= FPM_EV_MASK_RELEASE_FM; 1174 1175 iowrite32be(tmp, &fpm_rg->fmfp_ee); 1176 } 1177 1178 static int fill_soc_specific_params(struct fman_state_struct *state) 1179 { 1180 u8 minor = state->rev_info.minor; 1181 /* P4080 - Major 2 1182 * P2041/P3041/P5020/P5040 - Major 3 1183 * Tx/Bx - Major 6 1184 */ 1185 switch (state->rev_info.major) { 1186 case 3: 1187 state->bmi_max_fifo_size = 160 * 1024; 1188 state->fm_iram_size = 64 * 1024; 1189 state->dma_thresh_max_commq = 31; 1190 state->dma_thresh_max_buf = 127; 1191 state->qmi_max_num_of_tnums = 64; 1192 state->qmi_def_tnums_thresh = 48; 1193 state->bmi_max_num_of_tasks = 128; 1194 state->max_num_of_open_dmas = 32; 1195 state->fm_port_num_of_cg = 256; 1196 state->num_of_rx_ports = 6; 1197 state->total_fifo_size = 122 * 1024; 1198 break; 1199 1200 case 2: 1201 state->bmi_max_fifo_size = 160 * 1024; 1202 state->fm_iram_size = 64 * 1024; 1203 state->dma_thresh_max_commq = 31; 1204 state->dma_thresh_max_buf = 127; 1205 state->qmi_max_num_of_tnums = 64; 1206 state->qmi_def_tnums_thresh = 48; 1207 state->bmi_max_num_of_tasks = 128; 1208 state->max_num_of_open_dmas = 32; 1209 state->fm_port_num_of_cg = 256; 1210 state->num_of_rx_ports = 5; 1211 state->total_fifo_size = 100 * 1024; 1212 break; 1213 1214 case 6: 1215 state->dma_thresh_max_commq = 83; 1216 state->dma_thresh_max_buf = 127; 1217 state->qmi_max_num_of_tnums = 64; 1218 state->qmi_def_tnums_thresh = 32; 1219 state->fm_port_num_of_cg = 256; 1220 1221 /* FManV3L */ 1222 if (minor == 1 || minor == 4) { 1223 state->bmi_max_fifo_size = 192 * 1024; 1224 state->bmi_max_num_of_tasks = 64; 1225 state->max_num_of_open_dmas = 32; 1226 state->num_of_rx_ports = 5; 1227 if (minor == 1) 1228 state->fm_iram_size = 32 * 1024; 1229 else 1230 state->fm_iram_size = 64 * 1024; 1231 state->total_fifo_size = 156 * 1024; 1232 } 1233 /* FManV3H */ 1234 else if (minor == 0 || minor == 2 || minor == 3) { 1235 state->bmi_max_fifo_size = 384 * 1024; 1236 state->fm_iram_size = 64 * 1024; 1237 state->bmi_max_num_of_tasks = 128; 1238 state->max_num_of_open_dmas = 84; 1239 state->num_of_rx_ports = 8; 1240 state->total_fifo_size = 295 * 1024; 1241 } else { 1242 pr_err("Unsupported FManv3 version\n"); 1243 return -EINVAL; 1244 } 1245 1246 break; 1247 default: 1248 pr_err("Unsupported FMan version\n"); 1249 return -EINVAL; 1250 } 1251 1252 return 0; 1253 } 1254 1255 static bool is_init_done(struct fman_cfg *cfg) 1256 { 1257 /* Checks if FMan driver parameters were initialized */ 1258 if (!cfg) 1259 return true; 1260 1261 return false; 1262 } 1263 1264 static void free_init_resources(struct fman *fman) 1265 { 1266 if (fman->cam_offset) 1267 fman_muram_free_mem(fman->muram, fman->cam_offset, 1268 fman->cam_size); 1269 if (fman->fifo_offset) 1270 fman_muram_free_mem(fman->muram, fman->fifo_offset, 1271 fman->fifo_size); 1272 } 1273 1274 static irqreturn_t bmi_err_event(struct fman *fman) 1275 { 1276 u32 event, mask, force; 1277 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1278 irqreturn_t ret = IRQ_NONE; 1279 1280 event = ioread32be(&bmi_rg->fmbm_ievr); 1281 mask = ioread32be(&bmi_rg->fmbm_ier); 1282 event &= mask; 1283 /* clear the forced events */ 1284 force = ioread32be(&bmi_rg->fmbm_ifr); 1285 if (force & event) 1286 iowrite32be(force & ~event, &bmi_rg->fmbm_ifr); 1287 /* clear the acknowledged events */ 1288 iowrite32be(event, &bmi_rg->fmbm_ievr); 1289 1290 if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC) 1291 ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC); 1292 if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC) 1293 ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC); 1294 if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) 1295 ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC); 1296 if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) 1297 ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC); 1298 1299 return ret; 1300 } 1301 1302 static irqreturn_t qmi_err_event(struct fman *fman) 1303 { 1304 u32 event, mask, force; 1305 struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; 1306 irqreturn_t ret = IRQ_NONE; 1307 1308 event = ioread32be(&qmi_rg->fmqm_eie); 1309 mask = ioread32be(&qmi_rg->fmqm_eien); 1310 event &= mask; 1311 1312 /* clear the forced events */ 1313 force = ioread32be(&qmi_rg->fmqm_eif); 1314 if (force & event) 1315 iowrite32be(force & ~event, &qmi_rg->fmqm_eif); 1316 /* clear the acknowledged events */ 1317 iowrite32be(event, &qmi_rg->fmqm_eie); 1318 1319 if (event & QMI_ERR_INTR_EN_DOUBLE_ECC) 1320 ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC); 1321 if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) 1322 ret = fman->exception_cb(fman, 1323 FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID); 1324 1325 return ret; 1326 } 1327 1328 static irqreturn_t dma_err_event(struct fman *fman) 1329 { 1330 u32 status, mask, com_id; 1331 u8 tnum, port_id, relative_port_id; 1332 u16 liodn; 1333 struct fman_dma_regs __iomem *dma_rg = fman->dma_regs; 1334 irqreturn_t ret = IRQ_NONE; 1335 1336 status = ioread32be(&dma_rg->fmdmsr); 1337 mask = ioread32be(&dma_rg->fmdmmr); 1338 1339 /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */ 1340 if ((mask & DMA_MODE_BER) != DMA_MODE_BER) 1341 status &= ~DMA_STATUS_BUS_ERR; 1342 1343 /* clear relevant bits if mask has no DMA_MODE_ECC */ 1344 if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC) 1345 status &= ~(DMA_STATUS_FM_SPDAT_ECC | 1346 DMA_STATUS_READ_ECC | 1347 DMA_STATUS_SYSTEM_WRITE_ECC | 1348 DMA_STATUS_FM_WRITE_ECC); 1349 1350 /* clear set events */ 1351 iowrite32be(status, &dma_rg->fmdmsr); 1352 1353 if (status & DMA_STATUS_BUS_ERR) { 1354 u64 addr; 1355 1356 addr = (u64)ioread32be(&dma_rg->fmdmtal); 1357 addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32); 1358 1359 com_id = ioread32be(&dma_rg->fmdmtcid); 1360 port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >> 1361 DMA_TRANSFER_PORTID_SHIFT)); 1362 relative_port_id = 1363 hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); 1364 tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >> 1365 DMA_TRANSFER_TNUM_SHIFT); 1366 liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK); 1367 ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum, 1368 liodn); 1369 } 1370 if (status & DMA_STATUS_FM_SPDAT_ECC) 1371 ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC); 1372 if (status & DMA_STATUS_READ_ECC) 1373 ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC); 1374 if (status & DMA_STATUS_SYSTEM_WRITE_ECC) 1375 ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC); 1376 if (status & DMA_STATUS_FM_WRITE_ECC) 1377 ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC); 1378 1379 return ret; 1380 } 1381 1382 static irqreturn_t fpm_err_event(struct fman *fman) 1383 { 1384 u32 event; 1385 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1386 irqreturn_t ret = IRQ_NONE; 1387 1388 event = ioread32be(&fpm_rg->fmfp_ee); 1389 /* clear the all occurred events */ 1390 iowrite32be(event, &fpm_rg->fmfp_ee); 1391 1392 if ((event & FPM_EV_MASK_DOUBLE_ECC) && 1393 (event & FPM_EV_MASK_DOUBLE_ECC_EN)) 1394 ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC); 1395 if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) 1396 ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS); 1397 if ((event & FPM_EV_MASK_SINGLE_ECC) && 1398 (event & FPM_EV_MASK_SINGLE_ECC_EN)) 1399 ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC); 1400 1401 return ret; 1402 } 1403 1404 static irqreturn_t muram_err_intr(struct fman *fman) 1405 { 1406 u32 event, mask; 1407 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1408 irqreturn_t ret = IRQ_NONE; 1409 1410 event = ioread32be(&fpm_rg->fm_rcr); 1411 mask = ioread32be(&fpm_rg->fm_rie); 1412 1413 /* clear MURAM event bit (do not clear IRAM event) */ 1414 iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr); 1415 1416 if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC)) 1417 ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC); 1418 1419 return ret; 1420 } 1421 1422 static irqreturn_t qmi_event(struct fman *fman) 1423 { 1424 u32 event, mask, force; 1425 struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs; 1426 irqreturn_t ret = IRQ_NONE; 1427 1428 event = ioread32be(&qmi_rg->fmqm_ie); 1429 mask = ioread32be(&qmi_rg->fmqm_ien); 1430 event &= mask; 1431 /* clear the forced events */ 1432 force = ioread32be(&qmi_rg->fmqm_if); 1433 if (force & event) 1434 iowrite32be(force & ~event, &qmi_rg->fmqm_if); 1435 /* clear the acknowledged events */ 1436 iowrite32be(event, &qmi_rg->fmqm_ie); 1437 1438 if (event & QMI_INTR_EN_SINGLE_ECC) 1439 ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC); 1440 1441 return ret; 1442 } 1443 1444 static void enable_time_stamp(struct fman *fman) 1445 { 1446 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 1447 u16 fm_clk_freq = fman->state->fm_clk_freq; 1448 u32 tmp, intgr, ts_freq; 1449 u64 frac; 1450 1451 ts_freq = (u32)(1 << fman->state->count1_micro_bit); 1452 /* configure timestamp so that bit 8 will count 1 microsecond 1453 * Find effective count rate at TIMESTAMP least significant bits: 1454 * Effective_Count_Rate = 1MHz x 2^8 = 256MHz 1455 * Find frequency ratio between effective count rate and the clock: 1456 * Effective_Count_Rate / CLK e.g. for 600 MHz clock: 1457 * 256/600 = 0.4266666... 1458 */ 1459 1460 intgr = ts_freq / fm_clk_freq; 1461 /* we multiply by 2^16 to keep the fraction of the division 1462 * we do not div back, since we write this value as a fraction 1463 * see spec 1464 */ 1465 1466 frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq; 1467 /* we check remainder of the division in order to round up if not int */ 1468 if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq) 1469 frac++; 1470 1471 tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac; 1472 iowrite32be(tmp, &fpm_rg->fmfp_tsc2); 1473 1474 /* enable timestamp with original clock */ 1475 iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1); 1476 fman->state->enabled_time_stamp = true; 1477 } 1478 1479 static int clear_iram(struct fman *fman) 1480 { 1481 struct fman_iram_regs __iomem *iram; 1482 int i, count; 1483 1484 iram = fman->base_addr + IMEM_OFFSET; 1485 1486 /* Enable the auto-increment */ 1487 iowrite32be(IRAM_IADD_AIE, &iram->iadd); 1488 count = 100; 1489 do { 1490 udelay(1); 1491 } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count); 1492 if (count == 0) 1493 return -EBUSY; 1494 1495 for (i = 0; i < (fman->state->fm_iram_size / 4); i++) 1496 iowrite32be(0xffffffff, &iram->idata); 1497 1498 iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd); 1499 count = 100; 1500 do { 1501 udelay(1); 1502 } while ((ioread32be(&iram->idata) != 0xffffffff) && --count); 1503 if (count == 0) 1504 return -EBUSY; 1505 1506 return 0; 1507 } 1508 1509 static u32 get_exception_flag(enum fman_exceptions exception) 1510 { 1511 u32 bit_mask; 1512 1513 switch (exception) { 1514 case FMAN_EX_DMA_BUS_ERROR: 1515 bit_mask = EX_DMA_BUS_ERROR; 1516 break; 1517 case FMAN_EX_DMA_SINGLE_PORT_ECC: 1518 bit_mask = EX_DMA_SINGLE_PORT_ECC; 1519 break; 1520 case FMAN_EX_DMA_READ_ECC: 1521 bit_mask = EX_DMA_READ_ECC; 1522 break; 1523 case FMAN_EX_DMA_SYSTEM_WRITE_ECC: 1524 bit_mask = EX_DMA_SYSTEM_WRITE_ECC; 1525 break; 1526 case FMAN_EX_DMA_FM_WRITE_ECC: 1527 bit_mask = EX_DMA_FM_WRITE_ECC; 1528 break; 1529 case FMAN_EX_FPM_STALL_ON_TASKS: 1530 bit_mask = EX_FPM_STALL_ON_TASKS; 1531 break; 1532 case FMAN_EX_FPM_SINGLE_ECC: 1533 bit_mask = EX_FPM_SINGLE_ECC; 1534 break; 1535 case FMAN_EX_FPM_DOUBLE_ECC: 1536 bit_mask = EX_FPM_DOUBLE_ECC; 1537 break; 1538 case FMAN_EX_QMI_SINGLE_ECC: 1539 bit_mask = EX_QMI_SINGLE_ECC; 1540 break; 1541 case FMAN_EX_QMI_DOUBLE_ECC: 1542 bit_mask = EX_QMI_DOUBLE_ECC; 1543 break; 1544 case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: 1545 bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID; 1546 break; 1547 case FMAN_EX_BMI_LIST_RAM_ECC: 1548 bit_mask = EX_BMI_LIST_RAM_ECC; 1549 break; 1550 case FMAN_EX_BMI_STORAGE_PROFILE_ECC: 1551 bit_mask = EX_BMI_STORAGE_PROFILE_ECC; 1552 break; 1553 case FMAN_EX_BMI_STATISTICS_RAM_ECC: 1554 bit_mask = EX_BMI_STATISTICS_RAM_ECC; 1555 break; 1556 case FMAN_EX_BMI_DISPATCH_RAM_ECC: 1557 bit_mask = EX_BMI_DISPATCH_RAM_ECC; 1558 break; 1559 case FMAN_EX_MURAM_ECC: 1560 bit_mask = EX_MURAM_ECC; 1561 break; 1562 default: 1563 bit_mask = 0; 1564 break; 1565 } 1566 1567 return bit_mask; 1568 } 1569 1570 static int get_module_event(enum fman_event_modules module, u8 mod_id, 1571 enum fman_intr_type intr_type) 1572 { 1573 int event; 1574 1575 switch (module) { 1576 case FMAN_MOD_MAC: 1577 if (intr_type == FMAN_INTR_TYPE_ERR) 1578 event = FMAN_EV_ERR_MAC0 + mod_id; 1579 else 1580 event = FMAN_EV_MAC0 + mod_id; 1581 break; 1582 case FMAN_MOD_FMAN_CTRL: 1583 if (intr_type == FMAN_INTR_TYPE_ERR) 1584 event = FMAN_EV_CNT; 1585 else 1586 event = (FMAN_EV_FMAN_CTRL_0 + mod_id); 1587 break; 1588 case FMAN_MOD_DUMMY_LAST: 1589 event = FMAN_EV_CNT; 1590 break; 1591 default: 1592 event = FMAN_EV_CNT; 1593 break; 1594 } 1595 1596 return event; 1597 } 1598 1599 static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo, 1600 u32 *extra_size_of_fifo) 1601 { 1602 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1603 u32 fifo = *size_of_fifo; 1604 u32 extra_fifo = *extra_size_of_fifo; 1605 u32 tmp; 1606 1607 /* if this is the first time a port requires extra_fifo_pool_size, 1608 * the total extra_fifo_pool_size must be initialized to 1 buffer per 1609 * port 1610 */ 1611 if (extra_fifo && !fman->state->extra_fifo_pool_size) 1612 fman->state->extra_fifo_pool_size = 1613 fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS; 1614 1615 fman->state->extra_fifo_pool_size = 1616 max(fman->state->extra_fifo_pool_size, extra_fifo); 1617 1618 /* check that there are enough uncommitted fifo size */ 1619 if ((fman->state->accumulated_fifo_size + fifo) > 1620 (fman->state->total_fifo_size - 1621 fman->state->extra_fifo_pool_size)) { 1622 dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n", 1623 __func__); 1624 return -EAGAIN; 1625 } 1626 1627 /* Read, modify and write to HW */ 1628 tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) | 1629 ((extra_fifo / FMAN_BMI_FIFO_UNITS) << 1630 BMI_EXTRA_FIFO_SIZE_SHIFT); 1631 iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]); 1632 1633 /* update accumulated */ 1634 fman->state->accumulated_fifo_size += fifo; 1635 1636 return 0; 1637 } 1638 1639 static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks, 1640 u8 *num_of_extra_tasks) 1641 { 1642 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1643 u8 tasks = *num_of_tasks; 1644 u8 extra_tasks = *num_of_extra_tasks; 1645 u32 tmp; 1646 1647 if (extra_tasks) 1648 fman->state->extra_tasks_pool_size = 1649 max(fman->state->extra_tasks_pool_size, extra_tasks); 1650 1651 /* check that there are enough uncommitted tasks */ 1652 if ((fman->state->accumulated_num_of_tasks + tasks) > 1653 (fman->state->total_num_of_tasks - 1654 fman->state->extra_tasks_pool_size)) { 1655 dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n", 1656 __func__, fman->state->fm_id); 1657 return -EAGAIN; 1658 } 1659 /* update accumulated */ 1660 fman->state->accumulated_num_of_tasks += tasks; 1661 1662 /* Write to HW */ 1663 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & 1664 ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK); 1665 tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) | 1666 (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT)); 1667 iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); 1668 1669 return 0; 1670 } 1671 1672 static int set_num_of_open_dmas(struct fman *fman, u8 port_id, 1673 u8 *num_of_open_dmas, 1674 u8 *num_of_extra_open_dmas) 1675 { 1676 struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs; 1677 u8 open_dmas = *num_of_open_dmas; 1678 u8 extra_open_dmas = *num_of_extra_open_dmas; 1679 u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0; 1680 u32 tmp; 1681 1682 if (!open_dmas) { 1683 /* Configuration according to values in the HW. 1684 * read the current number of open Dma's 1685 */ 1686 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); 1687 current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >> 1688 BMI_EXTRA_NUM_OF_DMAS_SHIFT); 1689 1690 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]); 1691 current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >> 1692 BMI_NUM_OF_DMAS_SHIFT) + 1); 1693 1694 /* This is the first configuration and user did not 1695 * specify value (!open_dmas), reset values will be used 1696 * and we just save these values for resource management 1697 */ 1698 fman->state->extra_open_dmas_pool_size = 1699 (u8)max(fman->state->extra_open_dmas_pool_size, 1700 current_extra_val); 1701 fman->state->accumulated_num_of_open_dmas += current_val; 1702 *num_of_open_dmas = current_val; 1703 *num_of_extra_open_dmas = current_extra_val; 1704 return 0; 1705 } 1706 1707 if (extra_open_dmas > current_extra_val) 1708 fman->state->extra_open_dmas_pool_size = 1709 (u8)max(fman->state->extra_open_dmas_pool_size, 1710 extra_open_dmas); 1711 1712 if ((fman->state->rev_info.major < 6) && 1713 (fman->state->accumulated_num_of_open_dmas - current_val + 1714 open_dmas > fman->state->max_num_of_open_dmas)) { 1715 dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n", 1716 __func__, fman->state->fm_id); 1717 return -EAGAIN; 1718 } else if ((fman->state->rev_info.major >= 6) && 1719 !((fman->state->rev_info.major == 6) && 1720 (fman->state->rev_info.minor == 0)) && 1721 (fman->state->accumulated_num_of_open_dmas - 1722 current_val + open_dmas > 1723 fman->state->dma_thresh_max_commq + 1)) { 1724 dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n", 1725 __func__, fman->state->fm_id, 1726 fman->state->dma_thresh_max_commq + 1); 1727 return -EAGAIN; 1728 } 1729 1730 WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val); 1731 /* update acummulated */ 1732 fman->state->accumulated_num_of_open_dmas -= current_val; 1733 fman->state->accumulated_num_of_open_dmas += open_dmas; 1734 1735 if (fman->state->rev_info.major < 6) 1736 total_num_dmas = 1737 (u8)(fman->state->accumulated_num_of_open_dmas + 1738 fman->state->extra_open_dmas_pool_size); 1739 1740 /* calculate reg */ 1741 tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) & 1742 ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK); 1743 tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) | 1744 (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT)); 1745 iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]); 1746 1747 /* update total num of DMA's with committed number of open DMAS, 1748 * and max uncommitted pool. 1749 */ 1750 if (total_num_dmas) { 1751 tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; 1752 tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT; 1753 iowrite32be(tmp, &bmi_rg->fmbm_cfg2); 1754 } 1755 1756 return 0; 1757 } 1758 1759 static int fman_config(struct fman *fman) 1760 { 1761 void __iomem *base_addr; 1762 int err; 1763 1764 base_addr = fman->dts_params.base_addr; 1765 1766 fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL); 1767 if (!fman->state) 1768 goto err_fm_state; 1769 1770 /* Allocate the FM driver's parameters structure */ 1771 fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL); 1772 if (!fman->cfg) 1773 goto err_fm_drv; 1774 1775 /* Initialize MURAM block */ 1776 fman->muram = 1777 fman_muram_init(fman->dts_params.muram_res.start, 1778 resource_size(&fman->dts_params.muram_res)); 1779 if (!fman->muram) 1780 goto err_fm_soc_specific; 1781 1782 /* Initialize FM parameters which will be kept by the driver */ 1783 fman->state->fm_id = fman->dts_params.id; 1784 fman->state->fm_clk_freq = fman->dts_params.clk_freq; 1785 fman->state->qman_channel_base = fman->dts_params.qman_channel_base; 1786 fman->state->num_of_qman_channels = 1787 fman->dts_params.num_of_qman_channels; 1788 fman->state->res = fman->dts_params.res; 1789 fman->exception_cb = fman_exceptions; 1790 fman->bus_error_cb = fman_bus_error; 1791 fman->fpm_regs = base_addr + FPM_OFFSET; 1792 fman->bmi_regs = base_addr + BMI_OFFSET; 1793 fman->qmi_regs = base_addr + QMI_OFFSET; 1794 fman->dma_regs = base_addr + DMA_OFFSET; 1795 fman->base_addr = base_addr; 1796 1797 spin_lock_init(&fman->spinlock); 1798 fman_defconfig(fman->cfg); 1799 1800 fman->state->extra_fifo_pool_size = 0; 1801 fman->state->exceptions = (EX_DMA_BUS_ERROR | 1802 EX_DMA_READ_ECC | 1803 EX_DMA_SYSTEM_WRITE_ECC | 1804 EX_DMA_FM_WRITE_ECC | 1805 EX_FPM_STALL_ON_TASKS | 1806 EX_FPM_SINGLE_ECC | 1807 EX_FPM_DOUBLE_ECC | 1808 EX_QMI_DEQ_FROM_UNKNOWN_PORTID | 1809 EX_BMI_LIST_RAM_ECC | 1810 EX_BMI_STORAGE_PROFILE_ECC | 1811 EX_BMI_STATISTICS_RAM_ECC | 1812 EX_MURAM_ECC | 1813 EX_BMI_DISPATCH_RAM_ECC | 1814 EX_QMI_DOUBLE_ECC | 1815 EX_QMI_SINGLE_ECC); 1816 1817 /* Read FMan revision for future use*/ 1818 fman_get_revision(fman, &fman->state->rev_info); 1819 1820 err = fill_soc_specific_params(fman->state); 1821 if (err) 1822 goto err_fm_soc_specific; 1823 1824 /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */ 1825 if (fman->state->rev_info.major >= 6) 1826 fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID; 1827 1828 fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh; 1829 1830 fman->state->total_num_of_tasks = 1831 (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major, 1832 fman->state->rev_info.minor, 1833 fman->state->bmi_max_num_of_tasks); 1834 1835 if (fman->state->rev_info.major < 6) { 1836 fman->cfg->dma_comm_qtsh_clr_emer = 1837 (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major, 1838 fman->state->dma_thresh_max_commq); 1839 1840 fman->cfg->dma_comm_qtsh_asrt_emer = 1841 (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major, 1842 fman->state->dma_thresh_max_commq); 1843 1844 fman->cfg->dma_cam_num_of_entries = 1845 DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major); 1846 1847 fman->cfg->dma_read_buf_tsh_clr_emer = 1848 DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf); 1849 1850 fman->cfg->dma_read_buf_tsh_asrt_emer = 1851 DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); 1852 1853 fman->cfg->dma_write_buf_tsh_clr_emer = 1854 DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf); 1855 1856 fman->cfg->dma_write_buf_tsh_asrt_emer = 1857 DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf); 1858 1859 fman->cfg->dma_axi_dbg_num_of_beats = 1860 DFLT_AXI_DBG_NUM_OF_BEATS; 1861 } 1862 1863 return 0; 1864 1865 err_fm_soc_specific: 1866 kfree(fman->cfg); 1867 err_fm_drv: 1868 kfree(fman->state); 1869 err_fm_state: 1870 kfree(fman); 1871 return -EINVAL; 1872 } 1873 1874 static int fman_init(struct fman *fman) 1875 { 1876 struct fman_cfg *cfg = NULL; 1877 int err = 0, i, count; 1878 1879 if (is_init_done(fman->cfg)) 1880 return -EINVAL; 1881 1882 fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT; 1883 1884 cfg = fman->cfg; 1885 1886 /* clear revision-dependent non existing exception */ 1887 if (fman->state->rev_info.major < 6) 1888 fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC; 1889 1890 if (fman->state->rev_info.major >= 6) 1891 fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC; 1892 1893 /* clear CPG */ 1894 memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0, 1895 fman->state->fm_port_num_of_cg); 1896 1897 /* Save LIODN info before FMan reset 1898 * Skipping non-existent port 0 (i = 1) 1899 */ 1900 for (i = 1; i < FMAN_LIODN_TBL; i++) { 1901 u32 liodn_base; 1902 1903 fman->liodn_offset[i] = 1904 ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]); 1905 liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]); 1906 if (i % 2) { 1907 /* FMDM_PLR LSB holds LIODN base for odd ports */ 1908 liodn_base &= DMA_LIODN_BASE_MASK; 1909 } else { 1910 /* FMDM_PLR MSB holds LIODN base for even ports */ 1911 liodn_base >>= DMA_LIODN_SHIFT; 1912 liodn_base &= DMA_LIODN_BASE_MASK; 1913 } 1914 fman->liodn_base[i] = liodn_base; 1915 } 1916 1917 /* FMan Reset (supported only for FMan V2) */ 1918 if (fman->state->rev_info.major >= 6) { 1919 /* Errata A007273 */ 1920 dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n", 1921 __func__); 1922 } else { 1923 iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc); 1924 /* Wait for reset completion */ 1925 count = 100; 1926 do { 1927 udelay(1); 1928 } while (((ioread32be(&fman->fpm_regs->fm_rstc)) & 1929 FPM_RSTC_FM_RESET) && --count); 1930 if (count == 0) 1931 return -EBUSY; 1932 } 1933 1934 if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) { 1935 resume(fman->fpm_regs); 1936 /* Wait until QMI is not in halt not busy state */ 1937 count = 100; 1938 do { 1939 udelay(1); 1940 } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) & 1941 QMI_GS_HALT_NOT_BUSY) && --count); 1942 if (count == 0) 1943 dev_warn(fman->dev, "%s: QMI is in halt not busy state\n", 1944 __func__); 1945 } 1946 1947 if (clear_iram(fman) != 0) 1948 return -EINVAL; 1949 1950 cfg->exceptions = fman->state->exceptions; 1951 1952 /* Init DMA Registers */ 1953 1954 err = dma_init(fman); 1955 if (err != 0) { 1956 free_init_resources(fman); 1957 return err; 1958 } 1959 1960 /* Init FPM Registers */ 1961 fpm_init(fman->fpm_regs, fman->cfg); 1962 1963 /* define common resources */ 1964 /* allocate MURAM for FIFO according to total size */ 1965 fman->fifo_offset = fman_muram_alloc(fman->muram, 1966 fman->state->total_fifo_size); 1967 if (IS_ERR_VALUE(fman->cam_offset)) { 1968 free_init_resources(fman); 1969 dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", 1970 __func__); 1971 return -ENOMEM; 1972 } 1973 1974 cfg->fifo_base_addr = fman->fifo_offset; 1975 cfg->total_fifo_size = fman->state->total_fifo_size; 1976 cfg->total_num_of_tasks = fman->state->total_num_of_tasks; 1977 cfg->clk_freq = fman->state->fm_clk_freq; 1978 1979 /* Init BMI Registers */ 1980 bmi_init(fman->bmi_regs, fman->cfg); 1981 1982 /* Init QMI Registers */ 1983 qmi_init(fman->qmi_regs, fman->cfg); 1984 1985 err = enable(fman, cfg); 1986 if (err != 0) 1987 return err; 1988 1989 enable_time_stamp(fman); 1990 1991 kfree(fman->cfg); 1992 fman->cfg = NULL; 1993 1994 return 0; 1995 } 1996 1997 static int fman_set_exception(struct fman *fman, 1998 enum fman_exceptions exception, bool enable) 1999 { 2000 u32 bit_mask = 0; 2001 2002 if (!is_init_done(fman->cfg)) 2003 return -EINVAL; 2004 2005 bit_mask = get_exception_flag(exception); 2006 if (bit_mask) { 2007 if (enable) 2008 fman->state->exceptions |= bit_mask; 2009 else 2010 fman->state->exceptions &= ~bit_mask; 2011 } else { 2012 dev_err(fman->dev, "%s: Undefined exception (%d)\n", 2013 __func__, exception); 2014 return -EINVAL; 2015 } 2016 2017 return set_exception(fman, exception, enable); 2018 } 2019 2020 /** 2021 * fman_register_intr 2022 * @fman: A Pointer to FMan device 2023 * @mod: Calling module 2024 * @mod_id: Module id (if more than 1 exists, '0' if not) 2025 * @intr_type: Interrupt type (error/normal) selection. 2026 * @f_isr: The interrupt service routine. 2027 * @h_src_arg: Argument to be passed to f_isr. 2028 * 2029 * Used to register an event handler to be processed by FMan 2030 * 2031 * Return: 0 on success; Error code otherwise. 2032 */ 2033 void fman_register_intr(struct fman *fman, enum fman_event_modules module, 2034 u8 mod_id, enum fman_intr_type intr_type, 2035 void (*isr_cb)(void *src_arg), void *src_arg) 2036 { 2037 int event = 0; 2038 2039 event = get_module_event(module, mod_id, intr_type); 2040 WARN_ON(event >= FMAN_EV_CNT); 2041 2042 /* register in local FM structure */ 2043 fman->intr_mng[event].isr_cb = isr_cb; 2044 fman->intr_mng[event].src_handle = src_arg; 2045 } 2046 2047 /** 2048 * fman_unregister_intr 2049 * @fman: A Pointer to FMan device 2050 * @mod: Calling module 2051 * @mod_id: Module id (if more than 1 exists, '0' if not) 2052 * @intr_type: Interrupt type (error/normal) selection. 2053 * 2054 * Used to unregister an event handler to be processed by FMan 2055 * 2056 * Return: 0 on success; Error code otherwise. 2057 */ 2058 void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, 2059 u8 mod_id, enum fman_intr_type intr_type) 2060 { 2061 int event = 0; 2062 2063 event = get_module_event(module, mod_id, intr_type); 2064 WARN_ON(event >= FMAN_EV_CNT); 2065 2066 fman->intr_mng[event].isr_cb = NULL; 2067 fman->intr_mng[event].src_handle = NULL; 2068 } 2069 2070 /** 2071 * fman_set_port_params 2072 * @fman: A Pointer to FMan device 2073 * @port_params: Port parameters 2074 * 2075 * Used by FMan Port to pass parameters to the FMan 2076 * 2077 * Return: 0 on success; Error code otherwise. 2078 */ 2079 int fman_set_port_params(struct fman *fman, 2080 struct fman_port_init_params *port_params) 2081 { 2082 int err; 2083 unsigned long flags; 2084 u8 port_id = port_params->port_id, mac_id; 2085 2086 spin_lock_irqsave(&fman->spinlock, flags); 2087 2088 err = set_num_of_tasks(fman, port_params->port_id, 2089 &port_params->num_of_tasks, 2090 &port_params->num_of_extra_tasks); 2091 if (err) 2092 goto return_err; 2093 2094 /* TX Ports */ 2095 if (port_params->port_type != FMAN_PORT_TYPE_RX) { 2096 u32 enq_th, deq_th, reg; 2097 2098 /* update qmi ENQ/DEQ threshold */ 2099 fman->state->accumulated_num_of_deq_tnums += 2100 port_params->deq_pipeline_depth; 2101 enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) & 2102 QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT; 2103 /* if enq_th is too big, we reduce it to the max value 2104 * that is still 0 2105 */ 2106 if (enq_th >= (fman->state->qmi_max_num_of_tnums - 2107 fman->state->accumulated_num_of_deq_tnums)) { 2108 enq_th = 2109 fman->state->qmi_max_num_of_tnums - 2110 fman->state->accumulated_num_of_deq_tnums - 1; 2111 2112 reg = ioread32be(&fman->qmi_regs->fmqm_gc); 2113 reg &= ~QMI_CFG_ENQ_MASK; 2114 reg |= (enq_th << QMI_CFG_ENQ_SHIFT); 2115 iowrite32be(reg, &fman->qmi_regs->fmqm_gc); 2116 } 2117 2118 deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) & 2119 QMI_CFG_DEQ_MASK; 2120 /* if deq_th is too small, we enlarge it to the min 2121 * value that is still 0. 2122 * depTh may not be larger than 63 2123 * (fman->state->qmi_max_num_of_tnums-1). 2124 */ 2125 if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) && 2126 (deq_th < fman->state->qmi_max_num_of_tnums - 1)) { 2127 deq_th = fman->state->accumulated_num_of_deq_tnums + 1; 2128 reg = ioread32be(&fman->qmi_regs->fmqm_gc); 2129 reg &= ~QMI_CFG_DEQ_MASK; 2130 reg |= deq_th; 2131 iowrite32be(reg, &fman->qmi_regs->fmqm_gc); 2132 } 2133 } 2134 2135 err = set_size_of_fifo(fman, port_params->port_id, 2136 &port_params->size_of_fifo, 2137 &port_params->extra_size_of_fifo); 2138 if (err) 2139 goto return_err; 2140 2141 err = set_num_of_open_dmas(fman, port_params->port_id, 2142 &port_params->num_of_open_dmas, 2143 &port_params->num_of_extra_open_dmas); 2144 if (err) 2145 goto return_err; 2146 2147 set_port_liodn(fman, port_id, fman->liodn_base[port_id], 2148 fman->liodn_offset[port_id]); 2149 2150 if (fman->state->rev_info.major < 6) 2151 set_port_order_restoration(fman->fpm_regs, port_id); 2152 2153 mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id); 2154 2155 if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) { 2156 fman->state->port_mfl[mac_id] = port_params->max_frame_length; 2157 } else { 2158 dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n", 2159 __func__, port_id, mac_id); 2160 err = -EINVAL; 2161 goto return_err; 2162 } 2163 2164 spin_unlock_irqrestore(&fman->spinlock, flags); 2165 2166 return 0; 2167 2168 return_err: 2169 spin_unlock_irqrestore(&fman->spinlock, flags); 2170 return err; 2171 } 2172 2173 /** 2174 * fman_reset_mac 2175 * @fman: A Pointer to FMan device 2176 * @mac_id: MAC id to be reset 2177 * 2178 * Reset a specific MAC 2179 * 2180 * Return: 0 on success; Error code otherwise. 2181 */ 2182 int fman_reset_mac(struct fman *fman, u8 mac_id) 2183 { 2184 struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; 2185 u32 msk, timeout = 100; 2186 2187 if (fman->state->rev_info.major >= 6) { 2188 dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n", 2189 __func__); 2190 return -EINVAL; 2191 } 2192 2193 /* Get the relevant bit mask */ 2194 switch (mac_id) { 2195 case 0: 2196 msk = FPM_RSTC_MAC0_RESET; 2197 break; 2198 case 1: 2199 msk = FPM_RSTC_MAC1_RESET; 2200 break; 2201 case 2: 2202 msk = FPM_RSTC_MAC2_RESET; 2203 break; 2204 case 3: 2205 msk = FPM_RSTC_MAC3_RESET; 2206 break; 2207 case 4: 2208 msk = FPM_RSTC_MAC4_RESET; 2209 break; 2210 case 5: 2211 msk = FPM_RSTC_MAC5_RESET; 2212 break; 2213 case 6: 2214 msk = FPM_RSTC_MAC6_RESET; 2215 break; 2216 case 7: 2217 msk = FPM_RSTC_MAC7_RESET; 2218 break; 2219 case 8: 2220 msk = FPM_RSTC_MAC8_RESET; 2221 break; 2222 case 9: 2223 msk = FPM_RSTC_MAC9_RESET; 2224 break; 2225 default: 2226 dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n", 2227 __func__, mac_id); 2228 return -EINVAL; 2229 } 2230 2231 /* reset */ 2232 iowrite32be(msk, &fpm_rg->fm_rstc); 2233 while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout) 2234 udelay(10); 2235 2236 if (!timeout) 2237 return -EIO; 2238 2239 return 0; 2240 } 2241 2242 /** 2243 * fman_set_mac_max_frame 2244 * @fman: A Pointer to FMan device 2245 * @mac_id: MAC id 2246 * @mfl: Maximum frame length 2247 * 2248 * Set maximum frame length of specific MAC in FMan driver 2249 * 2250 * Return: 0 on success; Error code otherwise. 2251 */ 2252 int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) 2253 { 2254 /* if port is already initialized, check that MaxFrameLength is smaller 2255 * or equal to the port's max 2256 */ 2257 if ((!fman->state->port_mfl[mac_id]) || 2258 (fman->state->port_mfl[mac_id] && 2259 (mfl <= fman->state->port_mfl[mac_id]))) { 2260 fman->state->mac_mfl[mac_id] = mfl; 2261 } else { 2262 dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", 2263 __func__); 2264 return -EINVAL; 2265 } 2266 return 0; 2267 } 2268 2269 /** 2270 * fman_get_clock_freq 2271 * @fman: A Pointer to FMan device 2272 * 2273 * Get FMan clock frequency 2274 * 2275 * Return: FMan clock frequency 2276 */ 2277 u16 fman_get_clock_freq(struct fman *fman) 2278 { 2279 return fman->state->fm_clk_freq; 2280 } 2281 2282 /** 2283 * fman_get_bmi_max_fifo_size 2284 * @fman: A Pointer to FMan device 2285 * 2286 * Get FMan maximum FIFO size 2287 * 2288 * Return: FMan Maximum FIFO size 2289 */ 2290 u32 fman_get_bmi_max_fifo_size(struct fman *fman) 2291 { 2292 return fman->state->bmi_max_fifo_size; 2293 } 2294 2295 /** 2296 * fman_get_revision 2297 * @fman - Pointer to the FMan module 2298 * @rev_info - A structure of revision information parameters. 2299 * 2300 * Returns the FM revision 2301 * 2302 * Allowed only following fman_init(). 2303 * 2304 * Return: 0 on success; Error code otherwise. 2305 */ 2306 void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) 2307 { 2308 u32 tmp; 2309 2310 tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1); 2311 rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >> 2312 FPM_REV1_MAJOR_SHIFT); 2313 rev_info->minor = tmp & FPM_REV1_MINOR_MASK; 2314 } 2315 2316 /** 2317 * fman_get_qman_channel_id 2318 * @fman: A Pointer to FMan device 2319 * @port_id: Port id 2320 * 2321 * Get QMan channel ID associated to the Port id 2322 * 2323 * Return: QMan channel ID 2324 */ 2325 u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) 2326 { 2327 int i; 2328 2329 if (fman->state->rev_info.major >= 6) { 2330 u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b, 2331 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; 2332 for (i = 0; i < fman->state->num_of_qman_channels; i++) { 2333 if (port_ids[i] == port_id) 2334 break; 2335 } 2336 } else { 2337 u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1, 2338 0x2, 0x3, 0x4, 0x5, 0x7, 0x7}; 2339 for (i = 0; i < fman->state->num_of_qman_channels; i++) { 2340 if (port_ids[i] == port_id) 2341 break; 2342 } 2343 } 2344 2345 if (i == fman->state->num_of_qman_channels) 2346 return 0; 2347 2348 return fman->state->qman_channel_base + i; 2349 } 2350 2351 /** 2352 * fman_get_mem_region 2353 * @fman: A Pointer to FMan device 2354 * 2355 * Get FMan memory region 2356 * 2357 * Return: A structure with FMan memory region information 2358 */ 2359 struct resource *fman_get_mem_region(struct fman *fman) 2360 { 2361 return fman->state->res; 2362 } 2363 2364 /* Bootargs defines */ 2365 /* Extra headroom for RX buffers - Default, min and max */ 2366 #define FSL_FM_RX_EXTRA_HEADROOM 64 2367 #define FSL_FM_RX_EXTRA_HEADROOM_MIN 16 2368 #define FSL_FM_RX_EXTRA_HEADROOM_MAX 384 2369 2370 /* Maximum frame length */ 2371 #define FSL_FM_MAX_FRAME_SIZE 1522 2372 #define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600 2373 #define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64 2374 2375 /* Extra headroom for Rx buffers. 2376 * FMan is instructed to allocate, on the Rx path, this amount of 2377 * space at the beginning of a data buffer, beside the DPA private 2378 * data area and the IC fields. 2379 * Does not impact Tx buffer layout. 2380 * Configurable from bootargs. 64 by default, it's needed on 2381 * particular forwarding scenarios that add extra headers to the 2382 * forwarded frame. 2383 */ 2384 int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; 2385 module_param(fsl_fm_rx_extra_headroom, int, 0); 2386 MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); 2387 2388 /* Max frame size, across all interfaces. 2389 * Configurable from bootargs, to avoid allocating oversized (socket) 2390 * buffers when not using jumbo frames. 2391 * Must be large enough to accommodate the network MTU, but small enough 2392 * to avoid wasting skb memory. 2393 * 2394 * Could be overridden once, at boot-time, via the 2395 * fm_set_max_frm() callback. 2396 */ 2397 int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; 2398 module_param(fsl_fm_max_frm, int, 0); 2399 MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); 2400 2401 /** 2402 * fman_get_max_frm 2403 * 2404 * Return: Max frame length configured in the FM driver 2405 */ 2406 u16 fman_get_max_frm(void) 2407 { 2408 static bool fm_check_mfl; 2409 2410 if (!fm_check_mfl) { 2411 if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE || 2412 fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) { 2413 pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", 2414 fsl_fm_max_frm, 2415 FSL_FM_MIN_POSSIBLE_FRAME_SIZE, 2416 FSL_FM_MAX_POSSIBLE_FRAME_SIZE, 2417 FSL_FM_MAX_FRAME_SIZE); 2418 fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; 2419 } 2420 fm_check_mfl = true; 2421 } 2422 2423 return fsl_fm_max_frm; 2424 } 2425 EXPORT_SYMBOL(fman_get_max_frm); 2426 2427 /** 2428 * fman_get_rx_extra_headroom 2429 * 2430 * Return: Extra headroom size configured in the FM driver 2431 */ 2432 int fman_get_rx_extra_headroom(void) 2433 { 2434 static bool fm_check_rx_extra_headroom; 2435 2436 if (!fm_check_rx_extra_headroom) { 2437 if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX || 2438 fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) { 2439 pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n", 2440 fsl_fm_rx_extra_headroom, 2441 FSL_FM_RX_EXTRA_HEADROOM_MIN, 2442 FSL_FM_RX_EXTRA_HEADROOM_MAX, 2443 FSL_FM_RX_EXTRA_HEADROOM); 2444 fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; 2445 } 2446 2447 fm_check_rx_extra_headroom = true; 2448 fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16); 2449 } 2450 2451 return fsl_fm_rx_extra_headroom; 2452 } 2453 EXPORT_SYMBOL(fman_get_rx_extra_headroom); 2454 2455 /** 2456 * fman_bind 2457 * @dev: FMan OF device pointer 2458 * 2459 * Bind to a specific FMan device. 2460 * 2461 * Allowed only after the port was created. 2462 * 2463 * Return: A pointer to the FMan device 2464 */ 2465 struct fman *fman_bind(struct device *fm_dev) 2466 { 2467 return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); 2468 } 2469 2470 static irqreturn_t fman_err_irq(int irq, void *handle) 2471 { 2472 struct fman *fman = (struct fman *)handle; 2473 u32 pending; 2474 struct fman_fpm_regs __iomem *fpm_rg; 2475 irqreturn_t single_ret, ret = IRQ_NONE; 2476 2477 if (!is_init_done(fman->cfg)) 2478 return IRQ_NONE; 2479 2480 fpm_rg = fman->fpm_regs; 2481 2482 /* error interrupts */ 2483 pending = ioread32be(&fpm_rg->fm_epi); 2484 if (!pending) 2485 return IRQ_NONE; 2486 2487 if (pending & ERR_INTR_EN_BMI) { 2488 single_ret = bmi_err_event(fman); 2489 if (single_ret == IRQ_HANDLED) 2490 ret = IRQ_HANDLED; 2491 } 2492 if (pending & ERR_INTR_EN_QMI) { 2493 single_ret = qmi_err_event(fman); 2494 if (single_ret == IRQ_HANDLED) 2495 ret = IRQ_HANDLED; 2496 } 2497 if (pending & ERR_INTR_EN_FPM) { 2498 single_ret = fpm_err_event(fman); 2499 if (single_ret == IRQ_HANDLED) 2500 ret = IRQ_HANDLED; 2501 } 2502 if (pending & ERR_INTR_EN_DMA) { 2503 single_ret = dma_err_event(fman); 2504 if (single_ret == IRQ_HANDLED) 2505 ret = IRQ_HANDLED; 2506 } 2507 if (pending & ERR_INTR_EN_MURAM) { 2508 single_ret = muram_err_intr(fman); 2509 if (single_ret == IRQ_HANDLED) 2510 ret = IRQ_HANDLED; 2511 } 2512 2513 /* MAC error interrupts */ 2514 if (pending & ERR_INTR_EN_MAC0) { 2515 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0); 2516 if (single_ret == IRQ_HANDLED) 2517 ret = IRQ_HANDLED; 2518 } 2519 if (pending & ERR_INTR_EN_MAC1) { 2520 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1); 2521 if (single_ret == IRQ_HANDLED) 2522 ret = IRQ_HANDLED; 2523 } 2524 if (pending & ERR_INTR_EN_MAC2) { 2525 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2); 2526 if (single_ret == IRQ_HANDLED) 2527 ret = IRQ_HANDLED; 2528 } 2529 if (pending & ERR_INTR_EN_MAC3) { 2530 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3); 2531 if (single_ret == IRQ_HANDLED) 2532 ret = IRQ_HANDLED; 2533 } 2534 if (pending & ERR_INTR_EN_MAC4) { 2535 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4); 2536 if (single_ret == IRQ_HANDLED) 2537 ret = IRQ_HANDLED; 2538 } 2539 if (pending & ERR_INTR_EN_MAC5) { 2540 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5); 2541 if (single_ret == IRQ_HANDLED) 2542 ret = IRQ_HANDLED; 2543 } 2544 if (pending & ERR_INTR_EN_MAC6) { 2545 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6); 2546 if (single_ret == IRQ_HANDLED) 2547 ret = IRQ_HANDLED; 2548 } 2549 if (pending & ERR_INTR_EN_MAC7) { 2550 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7); 2551 if (single_ret == IRQ_HANDLED) 2552 ret = IRQ_HANDLED; 2553 } 2554 if (pending & ERR_INTR_EN_MAC8) { 2555 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8); 2556 if (single_ret == IRQ_HANDLED) 2557 ret = IRQ_HANDLED; 2558 } 2559 if (pending & ERR_INTR_EN_MAC9) { 2560 single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9); 2561 if (single_ret == IRQ_HANDLED) 2562 ret = IRQ_HANDLED; 2563 } 2564 2565 return ret; 2566 } 2567 2568 static irqreturn_t fman_irq(int irq, void *handle) 2569 { 2570 struct fman *fman = (struct fman *)handle; 2571 u32 pending; 2572 struct fman_fpm_regs __iomem *fpm_rg; 2573 irqreturn_t single_ret, ret = IRQ_NONE; 2574 2575 if (!is_init_done(fman->cfg)) 2576 return IRQ_NONE; 2577 2578 fpm_rg = fman->fpm_regs; 2579 2580 /* normal interrupts */ 2581 pending = ioread32be(&fpm_rg->fm_npi); 2582 if (!pending) 2583 return IRQ_NONE; 2584 2585 if (pending & INTR_EN_QMI) { 2586 single_ret = qmi_event(fman); 2587 if (single_ret == IRQ_HANDLED) 2588 ret = IRQ_HANDLED; 2589 } 2590 2591 /* MAC interrupts */ 2592 if (pending & INTR_EN_MAC0) { 2593 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0); 2594 if (single_ret == IRQ_HANDLED) 2595 ret = IRQ_HANDLED; 2596 } 2597 if (pending & INTR_EN_MAC1) { 2598 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1); 2599 if (single_ret == IRQ_HANDLED) 2600 ret = IRQ_HANDLED; 2601 } 2602 if (pending & INTR_EN_MAC2) { 2603 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2); 2604 if (single_ret == IRQ_HANDLED) 2605 ret = IRQ_HANDLED; 2606 } 2607 if (pending & INTR_EN_MAC3) { 2608 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3); 2609 if (single_ret == IRQ_HANDLED) 2610 ret = IRQ_HANDLED; 2611 } 2612 if (pending & INTR_EN_MAC4) { 2613 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4); 2614 if (single_ret == IRQ_HANDLED) 2615 ret = IRQ_HANDLED; 2616 } 2617 if (pending & INTR_EN_MAC5) { 2618 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5); 2619 if (single_ret == IRQ_HANDLED) 2620 ret = IRQ_HANDLED; 2621 } 2622 if (pending & INTR_EN_MAC6) { 2623 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6); 2624 if (single_ret == IRQ_HANDLED) 2625 ret = IRQ_HANDLED; 2626 } 2627 if (pending & INTR_EN_MAC7) { 2628 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7); 2629 if (single_ret == IRQ_HANDLED) 2630 ret = IRQ_HANDLED; 2631 } 2632 if (pending & INTR_EN_MAC8) { 2633 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8); 2634 if (single_ret == IRQ_HANDLED) 2635 ret = IRQ_HANDLED; 2636 } 2637 if (pending & INTR_EN_MAC9) { 2638 single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9); 2639 if (single_ret == IRQ_HANDLED) 2640 ret = IRQ_HANDLED; 2641 } 2642 2643 return ret; 2644 } 2645 2646 static const struct of_device_id fman_muram_match[] = { 2647 { 2648 .compatible = "fsl,fman-muram"}, 2649 {} 2650 }; 2651 MODULE_DEVICE_TABLE(of, fman_muram_match); 2652 2653 static struct fman *read_dts_node(struct platform_device *of_dev) 2654 { 2655 struct fman *fman; 2656 struct device_node *fm_node, *muram_node; 2657 struct resource *res; 2658 const u32 *u32_prop; 2659 int lenp, err, irq; 2660 struct clk *clk; 2661 u32 clk_rate; 2662 phys_addr_t phys_base_addr; 2663 resource_size_t mem_size; 2664 2665 fman = kzalloc(sizeof(*fman), GFP_KERNEL); 2666 if (!fman) 2667 return NULL; 2668 2669 fm_node = of_node_get(of_dev->dev.of_node); 2670 2671 u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp); 2672 if (!u32_prop) { 2673 dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n", 2674 __func__, fm_node->full_name); 2675 goto fman_node_put; 2676 } 2677 if (WARN_ON(lenp != sizeof(u32))) 2678 goto fman_node_put; 2679 2680 fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]); 2681 2682 /* Get the FM interrupt */ 2683 res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); 2684 if (!res) { 2685 dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n", 2686 __func__); 2687 goto fman_node_put; 2688 } 2689 irq = res->start; 2690 2691 /* Get the FM error interrupt */ 2692 res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1); 2693 if (!res) { 2694 dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n", 2695 __func__); 2696 goto fman_node_put; 2697 } 2698 fman->dts_params.err_irq = res->start; 2699 2700 /* Get the FM address */ 2701 res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); 2702 if (!res) { 2703 dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n", 2704 __func__); 2705 goto fman_node_put; 2706 } 2707 2708 phys_base_addr = res->start; 2709 mem_size = resource_size(res); 2710 2711 clk = of_clk_get(fm_node, 0); 2712 if (IS_ERR(clk)) { 2713 dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n", 2714 __func__, fman->dts_params.id); 2715 goto fman_node_put; 2716 } 2717 2718 clk_rate = clk_get_rate(clk); 2719 if (!clk_rate) { 2720 dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n", 2721 __func__, fman->dts_params.id); 2722 goto fman_node_put; 2723 } 2724 /* Rounding to MHz */ 2725 fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); 2726 2727 u32_prop = (const u32 *)of_get_property(fm_node, 2728 "fsl,qman-channel-range", 2729 &lenp); 2730 if (!u32_prop) { 2731 dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n", 2732 __func__, fm_node->full_name); 2733 goto fman_node_put; 2734 } 2735 if (WARN_ON(lenp != sizeof(u32) * 2)) 2736 goto fman_node_put; 2737 fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]); 2738 fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]); 2739 2740 /* Get the MURAM base address and size */ 2741 muram_node = of_find_matching_node(fm_node, fman_muram_match); 2742 if (!muram_node) { 2743 dev_err(&of_dev->dev, "%s: could not find MURAM node\n", 2744 __func__); 2745 goto fman_node_put; 2746 } 2747 2748 err = of_address_to_resource(muram_node, 0, 2749 &fman->dts_params.muram_res); 2750 if (err) { 2751 of_node_put(muram_node); 2752 dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", 2753 __func__, err); 2754 goto fman_node_put; 2755 } 2756 2757 of_node_put(muram_node); 2758 of_node_put(fm_node); 2759 2760 err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); 2761 if (err < 0) { 2762 dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", 2763 __func__, irq, err); 2764 goto fman_free; 2765 } 2766 2767 if (fman->dts_params.err_irq != 0) { 2768 err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq, 2769 fman_err_irq, IRQF_SHARED, 2770 "fman-err", fman); 2771 if (err < 0) { 2772 dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", 2773 __func__, fman->dts_params.err_irq, err); 2774 goto fman_free; 2775 } 2776 } 2777 2778 fman->dts_params.res = 2779 devm_request_mem_region(&of_dev->dev, phys_base_addr, 2780 mem_size, "fman"); 2781 if (!fman->dts_params.res) { 2782 dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", 2783 __func__); 2784 goto fman_free; 2785 } 2786 2787 fman->dts_params.base_addr = 2788 devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); 2789 if (fman->dts_params.base_addr == 0) { 2790 dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); 2791 goto fman_free; 2792 } 2793 2794 return fman; 2795 2796 fman_node_put: 2797 of_node_put(fm_node); 2798 fman_free: 2799 kfree(fman); 2800 return NULL; 2801 } 2802 2803 static int fman_probe(struct platform_device *of_dev) 2804 { 2805 struct fman *fman; 2806 struct device *dev; 2807 int err; 2808 2809 dev = &of_dev->dev; 2810 2811 fman = read_dts_node(of_dev); 2812 if (!fman) 2813 return -EIO; 2814 2815 err = fman_config(fman); 2816 if (err) { 2817 dev_err(dev, "%s: FMan config failed\n", __func__); 2818 return -EINVAL; 2819 } 2820 2821 if (fman_init(fman) != 0) { 2822 dev_err(dev, "%s: FMan init failed\n", __func__); 2823 return -EINVAL; 2824 } 2825 2826 if (fman->dts_params.err_irq == 0) { 2827 fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false); 2828 fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false); 2829 fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false); 2830 fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false); 2831 fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false); 2832 fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false); 2833 fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false); 2834 fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false); 2835 fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false); 2836 fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false); 2837 fman_set_exception(fman, 2838 FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false); 2839 fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false); 2840 fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC, 2841 false); 2842 fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false); 2843 fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false); 2844 } 2845 2846 dev_set_drvdata(dev, fman); 2847 2848 fman->dev = dev; 2849 2850 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); 2851 2852 return 0; 2853 } 2854 2855 static const struct of_device_id fman_match[] = { 2856 { 2857 .compatible = "fsl,fman"}, 2858 {} 2859 }; 2860 2861 MODULE_DEVICE_TABLE(of, fm_match); 2862 2863 static struct platform_driver fman_driver = { 2864 .driver = { 2865 .name = "fsl-fman", 2866 .of_match_table = fman_match, 2867 }, 2868 .probe = fman_probe, 2869 }; 2870 2871 builtin_platform_driver(fman_driver); 2872