1 #include <linux/types.h> 2 #include <linux/ioport.h> 3 #include <linux/slab.h> 4 #include <linux/export.h> 5 #include <linux/io.h> 6 #include <linux/mcb.h> 7 8 #include "mcb-internal.h" 9 10 struct mcb_parse_priv { 11 phys_addr_t mapbase; 12 void __iomem *base; 13 }; 14 15 #define for_each_chameleon_cell(dtype, p) \ 16 for ((dtype) = get_next_dtype((p)); \ 17 (dtype) != CHAMELEON_DTYPE_END; \ 18 (dtype) = get_next_dtype((p))) 19 20 static inline uint32_t get_next_dtype(void __iomem *p) 21 { 22 uint32_t dtype; 23 24 dtype = readl(p); 25 return dtype >> 28; 26 } 27 28 static int chameleon_parse_bdd(struct mcb_bus *bus, 29 struct chameleon_bar *cb, 30 void __iomem *base) 31 { 32 return 0; 33 } 34 35 static int chameleon_parse_gdd(struct mcb_bus *bus, 36 struct chameleon_bar *cb, 37 void __iomem *base, int bar_count) 38 { 39 struct chameleon_gdd __iomem *gdd = 40 (struct chameleon_gdd __iomem *) base; 41 struct mcb_device *mdev; 42 u32 dev_mapbase; 43 u32 offset; 44 u32 size; 45 int ret; 46 __le32 reg1; 47 __le32 reg2; 48 49 mdev = mcb_alloc_dev(bus); 50 if (!mdev) 51 return -ENOMEM; 52 53 reg1 = readl(&gdd->reg1); 54 reg2 = readl(&gdd->reg2); 55 offset = readl(&gdd->offset); 56 size = readl(&gdd->size); 57 58 mdev->id = GDD_DEV(reg1); 59 mdev->rev = GDD_REV(reg1); 60 mdev->var = GDD_VAR(reg1); 61 mdev->bar = GDD_BAR(reg2); 62 mdev->group = GDD_GRP(reg2); 63 mdev->inst = GDD_INS(reg2); 64 65 /* 66 * If the BAR is missing, dev_mapbase is zero, or if the 67 * device is IO mapped we just print a warning and go on with the 68 * next device, instead of completely stop the gdd parser 69 */ 70 if (mdev->bar > bar_count - 1) { 71 pr_info("No BAR for 16z%03d\n", mdev->id); 72 ret = 0; 73 goto err; 74 } 75 76 dev_mapbase = cb[mdev->bar].addr; 77 if (!dev_mapbase) { 78 pr_info("BAR not assigned for 16z%03d\n", mdev->id); 79 ret = 0; 80 goto err; 81 } 82 83 if (dev_mapbase & 0x01) { 84 pr_info("IO mapped Device (16z%03d) not yet supported\n", 85 mdev->id); 86 ret = 0; 87 goto err; 88 } 89 90 pr_debug("Found a 16z%03d\n", mdev->id); 91 92 mdev->irq.start = GDD_IRQ(reg1); 93 mdev->irq.end = GDD_IRQ(reg1); 94 mdev->irq.flags = IORESOURCE_IRQ; 95 96 mdev->mem.start = dev_mapbase + offset; 97 98 mdev->mem.end = mdev->mem.start + size - 1; 99 mdev->mem.flags = IORESOURCE_MEM; 100 101 mdev->is_added = false; 102 103 ret = mcb_device_register(bus, mdev); 104 if (ret < 0) 105 goto err; 106 107 return 0; 108 109 err: 110 mcb_free_dev(mdev); 111 112 return ret; 113 } 114 115 static void chameleon_parse_bar(void __iomem *base, 116 struct chameleon_bar *cb, int bar_count) 117 { 118 char __iomem *p = base; 119 int i; 120 121 /* skip reg1 */ 122 p += sizeof(__le32); 123 124 for (i = 0; i < bar_count; i++) { 125 cb[i].addr = readl(p); 126 cb[i].size = readl(p + 4); 127 128 p += sizeof(struct chameleon_bar); 129 } 130 } 131 132 static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase, 133 struct chameleon_bar **cb) 134 { 135 struct chameleon_bar *c; 136 int bar_count; 137 __le32 reg; 138 u32 dtype; 139 140 /* 141 * For those devices which are not connected 142 * to the PCI Bus (e.g. LPC) there is a bar 143 * descriptor located directly after the 144 * chameleon header. This header is comparable 145 * to a PCI header. 146 */ 147 dtype = get_next_dtype(*base); 148 if (dtype == CHAMELEON_DTYPE_BAR) { 149 reg = readl(*base); 150 151 bar_count = BAR_CNT(reg); 152 if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX) 153 return -ENODEV; 154 155 c = kcalloc(bar_count, sizeof(struct chameleon_bar), 156 GFP_KERNEL); 157 if (!c) 158 return -ENOMEM; 159 160 chameleon_parse_bar(*base, c, bar_count); 161 *base += BAR_DESC_SIZE(bar_count); 162 } else { 163 c = kzalloc(sizeof(struct chameleon_bar), GFP_KERNEL); 164 if (!c) 165 return -ENOMEM; 166 167 bar_count = 1; 168 c->addr = mapbase; 169 } 170 171 *cb = c; 172 173 return bar_count; 174 } 175 176 int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase, 177 void __iomem *base) 178 { 179 struct chameleon_fpga_header *header; 180 struct chameleon_bar *cb; 181 char __iomem *p = base; 182 int num_cells = 0; 183 uint32_t dtype; 184 int bar_count; 185 int ret; 186 u32 hsize; 187 188 hsize = sizeof(struct chameleon_fpga_header); 189 190 header = kzalloc(hsize, GFP_KERNEL); 191 if (!header) 192 return -ENOMEM; 193 194 /* Extract header information */ 195 memcpy_fromio(header, p, hsize); 196 /* We only support chameleon v2 at the moment */ 197 header->magic = le16_to_cpu(header->magic); 198 if (header->magic != CHAMELEONV2_MAGIC) { 199 pr_err("Unsupported chameleon version 0x%x\n", 200 header->magic); 201 ret = -ENODEV; 202 goto free_header; 203 } 204 p += hsize; 205 206 bus->revision = header->revision; 207 bus->model = header->model; 208 bus->minor = header->minor; 209 snprintf(bus->name, CHAMELEON_FILENAME_LEN + 1, "%s", 210 header->filename); 211 212 bar_count = chameleon_get_bar(&p, mapbase, &cb); 213 if (bar_count < 0) { 214 ret = bar_count; 215 goto free_header; 216 } 217 218 for_each_chameleon_cell(dtype, p) { 219 switch (dtype) { 220 case CHAMELEON_DTYPE_GENERAL: 221 ret = chameleon_parse_gdd(bus, cb, p, bar_count); 222 if (ret < 0) 223 goto free_bar; 224 p += sizeof(struct chameleon_gdd); 225 break; 226 case CHAMELEON_DTYPE_BRIDGE: 227 chameleon_parse_bdd(bus, cb, p); 228 p += sizeof(struct chameleon_bdd); 229 break; 230 case CHAMELEON_DTYPE_END: 231 break; 232 default: 233 pr_err("Invalid chameleon descriptor type 0x%x\n", 234 dtype); 235 ret = -EINVAL; 236 goto free_bar; 237 } 238 num_cells++; 239 } 240 241 if (num_cells == 0) 242 num_cells = -EINVAL; 243 244 kfree(cb); 245 kfree(header); 246 return num_cells; 247 248 free_bar: 249 kfree(cb); 250 free_header: 251 kfree(header); 252 253 return ret; 254 } 255 EXPORT_SYMBOL_GPL(chameleon_parse_cells); 256