1 /* 2 * Register map access API - MMIO support 3 * 4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/clk.h> 20 #include <linux/err.h> 21 #include <linux/io.h> 22 #include <linux/module.h> 23 #include <linux/regmap.h> 24 #include <linux/slab.h> 25 26 struct regmap_mmio_context { 27 void __iomem *regs; 28 unsigned reg_bytes; 29 unsigned val_bytes; 30 unsigned pad_bytes; 31 struct clk *clk; 32 }; 33 34 static inline void regmap_mmio_regsize_check(size_t reg_size) 35 { 36 switch (reg_size) { 37 case 1: 38 case 2: 39 case 4: 40 #ifdef CONFIG_64BIT 41 case 8: 42 #endif 43 break; 44 default: 45 BUG(); 46 } 47 } 48 49 static int regmap_mmio_regbits_check(size_t reg_bits) 50 { 51 switch (reg_bits) { 52 case 8: 53 case 16: 54 case 32: 55 #ifdef CONFIG_64BIT 56 case 64: 57 #endif 58 return 0; 59 default: 60 return -EINVAL; 61 } 62 } 63 64 static inline void regmap_mmio_count_check(size_t count) 65 { 66 BUG_ON(count % 2 != 0); 67 } 68 69 static int regmap_mmio_gather_write(void *context, 70 const void *reg, size_t reg_size, 71 const void *val, size_t val_size) 72 { 73 struct regmap_mmio_context *ctx = context; 74 u32 offset; 75 int ret; 76 77 regmap_mmio_regsize_check(reg_size); 78 79 if (!IS_ERR(ctx->clk)) { 80 ret = clk_enable(ctx->clk); 81 if (ret < 0) 82 return ret; 83 } 84 85 offset = *(u32 *)reg; 86 87 while (val_size) { 88 switch (ctx->val_bytes) { 89 case 1: 90 writeb(*(u8 *)val, ctx->regs + offset); 91 break; 92 case 2: 93 writew(*(u16 *)val, ctx->regs + offset); 94 break; 95 case 4: 96 writel(*(u32 *)val, ctx->regs + offset); 97 break; 98 #ifdef CONFIG_64BIT 99 case 8: 100 writeq(*(u64 *)val, ctx->regs + offset); 101 break; 102 #endif 103 default: 104 /* Should be caught by regmap_mmio_check_config */ 105 BUG(); 106 } 107 val_size -= ctx->val_bytes; 108 val += ctx->val_bytes; 109 offset += ctx->val_bytes; 110 } 111 112 if (!IS_ERR(ctx->clk)) 113 clk_disable(ctx->clk); 114 115 return 0; 116 } 117 118 static int regmap_mmio_write(void *context, const void *data, size_t count) 119 { 120 struct regmap_mmio_context *ctx = context; 121 u32 offset = ctx->reg_bytes + ctx->pad_bytes; 122 123 regmap_mmio_count_check(count); 124 125 return regmap_mmio_gather_write(context, data, ctx->reg_bytes, 126 data + offset, count - offset); 127 } 128 129 static int regmap_mmio_read(void *context, 130 const void *reg, size_t reg_size, 131 void *val, size_t val_size) 132 { 133 struct regmap_mmio_context *ctx = context; 134 u32 offset; 135 int ret; 136 137 regmap_mmio_regsize_check(reg_size); 138 139 if (!IS_ERR(ctx->clk)) { 140 ret = clk_enable(ctx->clk); 141 if (ret < 0) 142 return ret; 143 } 144 145 offset = *(u32 *)reg; 146 147 while (val_size) { 148 switch (ctx->val_bytes) { 149 case 1: 150 *(u8 *)val = readb(ctx->regs + offset); 151 break; 152 case 2: 153 *(u16 *)val = readw(ctx->regs + offset); 154 break; 155 case 4: 156 *(u32 *)val = readl(ctx->regs + offset); 157 break; 158 #ifdef CONFIG_64BIT 159 case 8: 160 *(u64 *)val = readq(ctx->regs + offset); 161 break; 162 #endif 163 default: 164 /* Should be caught by regmap_mmio_check_config */ 165 BUG(); 166 } 167 val_size -= ctx->val_bytes; 168 val += ctx->val_bytes; 169 offset += ctx->val_bytes; 170 } 171 172 if (!IS_ERR(ctx->clk)) 173 clk_disable(ctx->clk); 174 175 return 0; 176 } 177 178 static void regmap_mmio_free_context(void *context) 179 { 180 struct regmap_mmio_context *ctx = context; 181 182 if (!IS_ERR(ctx->clk)) { 183 clk_unprepare(ctx->clk); 184 clk_put(ctx->clk); 185 } 186 kfree(context); 187 } 188 189 static struct regmap_bus regmap_mmio = { 190 .fast_io = true, 191 .write = regmap_mmio_write, 192 .gather_write = regmap_mmio_gather_write, 193 .read = regmap_mmio_read, 194 .free_context = regmap_mmio_free_context, 195 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, 196 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 197 }; 198 199 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, 200 const char *clk_id, 201 void __iomem *regs, 202 const struct regmap_config *config) 203 { 204 struct regmap_mmio_context *ctx; 205 int min_stride; 206 int ret; 207 208 ret = regmap_mmio_regbits_check(config->reg_bits); 209 if (ret) 210 return ERR_PTR(ret); 211 212 if (config->pad_bits) 213 return ERR_PTR(-EINVAL); 214 215 switch (config->val_bits) { 216 case 8: 217 /* The core treats 0 as 1 */ 218 min_stride = 0; 219 break; 220 case 16: 221 min_stride = 2; 222 break; 223 case 32: 224 min_stride = 4; 225 break; 226 #ifdef CONFIG_64BIT 227 case 64: 228 min_stride = 8; 229 break; 230 #endif 231 break; 232 default: 233 return ERR_PTR(-EINVAL); 234 } 235 236 if (config->reg_stride < min_stride) 237 return ERR_PTR(-EINVAL); 238 239 switch (config->reg_format_endian) { 240 case REGMAP_ENDIAN_DEFAULT: 241 case REGMAP_ENDIAN_NATIVE: 242 break; 243 default: 244 return ERR_PTR(-EINVAL); 245 } 246 247 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 248 if (!ctx) 249 return ERR_PTR(-ENOMEM); 250 251 ctx->regs = regs; 252 ctx->val_bytes = config->val_bits / 8; 253 ctx->reg_bytes = config->reg_bits / 8; 254 ctx->pad_bytes = config->pad_bits / 8; 255 ctx->clk = ERR_PTR(-ENODEV); 256 257 if (clk_id == NULL) 258 return ctx; 259 260 ctx->clk = clk_get(dev, clk_id); 261 if (IS_ERR(ctx->clk)) { 262 ret = PTR_ERR(ctx->clk); 263 goto err_free; 264 } 265 266 ret = clk_prepare(ctx->clk); 267 if (ret < 0) { 268 clk_put(ctx->clk); 269 goto err_free; 270 } 271 272 return ctx; 273 274 err_free: 275 kfree(ctx); 276 277 return ERR_PTR(ret); 278 } 279 280 /** 281 * regmap_init_mmio_clk(): Initialise register map with register clock 282 * 283 * @dev: Device that will be interacted with 284 * @clk_id: register clock consumer ID 285 * @regs: Pointer to memory-mapped IO region 286 * @config: Configuration for register map 287 * 288 * The return value will be an ERR_PTR() on error or a valid pointer to 289 * a struct regmap. 290 */ 291 struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, 292 void __iomem *regs, 293 const struct regmap_config *config) 294 { 295 struct regmap_mmio_context *ctx; 296 297 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); 298 if (IS_ERR(ctx)) 299 return ERR_CAST(ctx); 300 301 return regmap_init(dev, ®map_mmio, ctx, config); 302 } 303 EXPORT_SYMBOL_GPL(regmap_init_mmio_clk); 304 305 /** 306 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock 307 * 308 * @dev: Device that will be interacted with 309 * @clk_id: register clock consumer ID 310 * @regs: Pointer to memory-mapped IO region 311 * @config: Configuration for register map 312 * 313 * The return value will be an ERR_PTR() on error or a valid pointer 314 * to a struct regmap. The regmap will be automatically freed by the 315 * device management code. 316 */ 317 struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, 318 void __iomem *regs, 319 const struct regmap_config *config) 320 { 321 struct regmap_mmio_context *ctx; 322 323 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); 324 if (IS_ERR(ctx)) 325 return ERR_CAST(ctx); 326 327 return devm_regmap_init(dev, ®map_mmio, ctx, config); 328 } 329 EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk); 330 331 MODULE_LICENSE("GPL v2"); 332