1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Analog Devices Generic AXI ADC IP core 4 * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip 5 * 6 * Copyright 2012-2020 Analog Devices Inc. 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/clk.h> 11 #include <linux/err.h> 12 #include <linux/io.h> 13 #include <linux/delay.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/property.h> 18 #include <linux/regmap.h> 19 #include <linux/slab.h> 20 21 #include <linux/fpga/adi-axi-common.h> 22 23 #include <linux/iio/backend.h> 24 #include <linux/iio/buffer-dmaengine.h> 25 #include <linux/iio/buffer.h> 26 #include <linux/iio/iio.h> 27 28 /* 29 * Register definitions: 30 * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map 31 */ 32 33 /* ADC controls */ 34 35 #define ADI_AXI_REG_RSTN 0x0040 36 #define ADI_AXI_REG_RSTN_CE_N BIT(2) 37 #define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1) 38 #define ADI_AXI_REG_RSTN_RSTN BIT(0) 39 40 /* ADC Channel controls */ 41 42 #define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40) 43 #define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11) 44 #define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10) 45 #define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9) 46 #define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8) 47 #define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4) 48 #define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6) 49 #define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5) 50 #define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4) 51 #define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1) 52 #define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0) 53 54 #define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \ 55 (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \ 56 ADI_AXI_REG_CHAN_CTRL_FMT_EN | \ 57 ADI_AXI_REG_CHAN_CTRL_ENABLE) 58 59 struct adi_axi_adc_state { 60 struct regmap *regmap; 61 struct device *dev; 62 }; 63 64 static int axi_adc_enable(struct iio_backend *back) 65 { 66 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 67 int ret; 68 69 ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN, 70 ADI_AXI_REG_RSTN_MMCM_RSTN); 71 if (ret) 72 return ret; 73 74 fsleep(10000); 75 return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN, 76 ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN); 77 } 78 79 static void axi_adc_disable(struct iio_backend *back) 80 { 81 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 82 83 regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); 84 } 85 86 static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan, 87 const struct iio_backend_data_fmt *data) 88 { 89 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 90 u32 val; 91 92 if (!data->enable) 93 return regmap_clear_bits(st->regmap, 94 ADI_AXI_REG_CHAN_CTRL(chan), 95 ADI_AXI_REG_CHAN_CTRL_FMT_EN); 96 97 val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true); 98 if (data->sign_extend) 99 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true); 100 if (data->type == IIO_BACKEND_OFFSET_BINARY) 101 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true); 102 103 return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 104 ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val); 105 } 106 107 static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan) 108 { 109 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 110 111 return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 112 ADI_AXI_REG_CHAN_CTRL_ENABLE); 113 } 114 115 static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan) 116 { 117 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 118 119 return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 120 ADI_AXI_REG_CHAN_CTRL_ENABLE); 121 } 122 123 static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back, 124 struct iio_dev *indio_dev) 125 { 126 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 127 struct iio_buffer *buffer; 128 const char *dma_name; 129 int ret; 130 131 if (device_property_read_string(st->dev, "dma-names", &dma_name)) 132 dma_name = "rx"; 133 134 buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name); 135 if (IS_ERR(buffer)) { 136 dev_err(st->dev, "Could not get DMA buffer, %ld\n", 137 PTR_ERR(buffer)); 138 return ERR_CAST(buffer); 139 } 140 141 indio_dev->modes |= INDIO_BUFFER_HARDWARE; 142 ret = iio_device_attach_buffer(indio_dev, buffer); 143 if (ret) 144 return ERR_PTR(ret); 145 146 return buffer; 147 } 148 149 static void axi_adc_free_buffer(struct iio_backend *back, 150 struct iio_buffer *buffer) 151 { 152 iio_dmaengine_buffer_free(buffer); 153 } 154 155 static const struct regmap_config axi_adc_regmap_config = { 156 .val_bits = 32, 157 .reg_bits = 32, 158 .reg_stride = 4, 159 .max_register = 0x0800, 160 }; 161 162 static const struct iio_backend_ops adi_axi_adc_generic = { 163 .enable = axi_adc_enable, 164 .disable = axi_adc_disable, 165 .data_format_set = axi_adc_data_format_set, 166 .chan_enable = axi_adc_chan_enable, 167 .chan_disable = axi_adc_chan_disable, 168 .request_buffer = axi_adc_request_buffer, 169 .free_buffer = axi_adc_free_buffer, 170 }; 171 172 static int adi_axi_adc_probe(struct platform_device *pdev) 173 { 174 const unsigned int *expected_ver; 175 struct adi_axi_adc_state *st; 176 void __iomem *base; 177 unsigned int ver; 178 struct clk *clk; 179 int ret; 180 181 st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL); 182 if (!st) 183 return -ENOMEM; 184 185 base = devm_platform_ioremap_resource(pdev, 0); 186 if (IS_ERR(base)) 187 return PTR_ERR(base); 188 189 st->dev = &pdev->dev; 190 st->regmap = devm_regmap_init_mmio(&pdev->dev, base, 191 &axi_adc_regmap_config); 192 if (IS_ERR(st->regmap)) 193 return PTR_ERR(st->regmap); 194 195 expected_ver = device_get_match_data(&pdev->dev); 196 if (!expected_ver) 197 return -ENODEV; 198 199 clk = devm_clk_get_enabled(&pdev->dev, NULL); 200 if (IS_ERR(clk)) 201 return PTR_ERR(clk); 202 203 /* 204 * Force disable the core. Up to the frontend to enable us. And we can 205 * still read/write registers... 206 */ 207 ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); 208 if (ret) 209 return ret; 210 211 ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver); 212 if (ret) 213 return ret; 214 215 if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) { 216 dev_err(&pdev->dev, 217 "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", 218 ADI_AXI_PCORE_VER_MAJOR(*expected_ver), 219 ADI_AXI_PCORE_VER_MINOR(*expected_ver), 220 ADI_AXI_PCORE_VER_PATCH(*expected_ver), 221 ADI_AXI_PCORE_VER_MAJOR(ver), 222 ADI_AXI_PCORE_VER_MINOR(ver), 223 ADI_AXI_PCORE_VER_PATCH(ver)); 224 return -ENODEV; 225 } 226 227 ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st); 228 if (ret) 229 return ret; 230 231 dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n", 232 ADI_AXI_PCORE_VER_MAJOR(ver), 233 ADI_AXI_PCORE_VER_MINOR(ver), 234 ADI_AXI_PCORE_VER_PATCH(ver)); 235 236 return 0; 237 } 238 239 static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a'); 240 241 /* Match table for of_platform binding */ 242 static const struct of_device_id adi_axi_adc_of_match[] = { 243 { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info }, 244 { /* end of list */ } 245 }; 246 MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match); 247 248 static struct platform_driver adi_axi_adc_driver = { 249 .driver = { 250 .name = KBUILD_MODNAME, 251 .of_match_table = adi_axi_adc_of_match, 252 }, 253 .probe = adi_axi_adc_probe, 254 }; 255 module_platform_driver(adi_axi_adc_driver); 256 257 MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); 258 MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver"); 259 MODULE_LICENSE("GPL v2"); 260 MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER); 261 MODULE_IMPORT_NS(IIO_BACKEND); 262