1 /* IBM POWER Barrier Synchronization Register Driver 2 * 3 * Copyright IBM Corporation 2008 4 * 5 * Author: Sonny Rao <sonnyrao@us.ibm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/of_platform.h> 26 #include <linux/fs.h> 27 #include <linux/module.h> 28 #include <linux/cdev.h> 29 #include <linux/list.h> 30 #include <linux/mm.h> 31 #include <linux/slab.h> 32 #include <asm/pgtable.h> 33 #include <asm/io.h> 34 35 /* 36 This driver exposes a special register which can be used for fast 37 synchronization across a large SMP machine. The hardware is exposed 38 as an array of bytes where each process will write to one of the bytes to 39 indicate it has finished the current stage and this update is broadcast to 40 all processors without having to bounce a cacheline between them. In 41 POWER5 and POWER6 there is one of these registers per SMP, but it is 42 presented in two forms; first, it is given as a whole and then as a number 43 of smaller registers which alias to parts of the single whole register. 44 This can potentially allow multiple groups of processes to each have their 45 own private synchronization device. 46 47 Note that this hardware *must* be written to using *only* single byte writes. 48 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since 49 this region is treated as cache-inhibited processes should also use a 50 full sync before and after writing to the BSR to ensure all stores and 51 the BSR update have made it to all chips in the system 52 */ 53 54 /* This is arbitrary number, up to Power6 it's been 17 or fewer */ 55 #define BSR_MAX_DEVS (32) 56 57 struct bsr_dev { 58 u64 bsr_addr; /* Real address */ 59 u64 bsr_len; /* length of mem region we can map */ 60 unsigned bsr_bytes; /* size of the BSR reg itself */ 61 unsigned bsr_stride; /* interval at which BSR repeats in the page */ 62 unsigned bsr_type; /* maps to enum below */ 63 unsigned bsr_num; /* bsr id number for its type */ 64 int bsr_minor; 65 66 struct list_head bsr_list; 67 68 dev_t bsr_dev; 69 struct cdev bsr_cdev; 70 struct device *bsr_device; 71 char bsr_name[32]; 72 73 }; 74 75 static unsigned total_bsr_devs; 76 static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs); 77 static struct class *bsr_class; 78 static int bsr_major; 79 80 enum { 81 BSR_8 = 0, 82 BSR_16 = 1, 83 BSR_64 = 2, 84 BSR_128 = 3, 85 BSR_4096 = 4, 86 BSR_UNKNOWN = 5, 87 BSR_MAX = 6, 88 }; 89 90 static unsigned bsr_types[BSR_MAX]; 91 92 static ssize_t 93 bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf) 94 { 95 struct bsr_dev *bsr_dev = dev_get_drvdata(dev); 96 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes); 97 } 98 99 static ssize_t 100 bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf) 101 { 102 struct bsr_dev *bsr_dev = dev_get_drvdata(dev); 103 return sprintf(buf, "%u\n", bsr_dev->bsr_stride); 104 } 105 106 static ssize_t 107 bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf) 108 { 109 struct bsr_dev *bsr_dev = dev_get_drvdata(dev); 110 return sprintf(buf, "%llu\n", bsr_dev->bsr_len); 111 } 112 113 static struct device_attribute bsr_dev_attrs[] = { 114 __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL), 115 __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL), 116 __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL), 117 __ATTR_NULL 118 }; 119 120 static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) 121 { 122 unsigned long size = vma->vm_end - vma->vm_start; 123 struct bsr_dev *dev = filp->private_data; 124 int ret; 125 126 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 127 128 /* check for the case of a small BSR device and map one 4k page for it*/ 129 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE) 130 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, 131 vma->vm_page_prot); 132 else if (size <= dev->bsr_len) 133 ret = io_remap_pfn_range(vma, vma->vm_start, 134 dev->bsr_addr >> PAGE_SHIFT, 135 size, vma->vm_page_prot); 136 else 137 return -EINVAL; 138 139 if (ret) 140 return -EAGAIN; 141 142 return 0; 143 } 144 145 static int bsr_open(struct inode * inode, struct file * filp) 146 { 147 struct cdev *cdev = inode->i_cdev; 148 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev); 149 150 filp->private_data = dev; 151 return 0; 152 } 153 154 static const struct file_operations bsr_fops = { 155 .owner = THIS_MODULE, 156 .mmap = bsr_mmap, 157 .open = bsr_open, 158 .llseek = noop_llseek, 159 }; 160 161 static void bsr_cleanup_devs(void) 162 { 163 struct bsr_dev *cur, *n; 164 165 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { 166 if (cur->bsr_device) { 167 cdev_del(&cur->bsr_cdev); 168 device_del(cur->bsr_device); 169 } 170 list_del(&cur->bsr_list); 171 kfree(cur); 172 } 173 } 174 175 static int bsr_add_node(struct device_node *bn) 176 { 177 int bsr_stride_len, bsr_bytes_len, num_bsr_devs; 178 const u32 *bsr_stride; 179 const u32 *bsr_bytes; 180 unsigned i; 181 int ret = -ENODEV; 182 183 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len); 184 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len); 185 186 if (!bsr_stride || !bsr_bytes || 187 (bsr_stride_len != bsr_bytes_len)) { 188 printk(KERN_ERR "bsr of-node has missing/incorrect property\n"); 189 return ret; 190 } 191 192 num_bsr_devs = bsr_bytes_len / sizeof(u32); 193 194 for (i = 0 ; i < num_bsr_devs; i++) { 195 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev), 196 GFP_KERNEL); 197 struct resource res; 198 int result; 199 200 if (!cur) { 201 printk(KERN_ERR "Unable to alloc bsr dev\n"); 202 ret = -ENOMEM; 203 goto out_err; 204 } 205 206 result = of_address_to_resource(bn, i, &res); 207 if (result < 0) { 208 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n"); 209 kfree(cur); 210 continue; 211 } 212 213 cur->bsr_minor = i + total_bsr_devs; 214 cur->bsr_addr = res.start; 215 cur->bsr_len = res.end - res.start + 1; 216 cur->bsr_bytes = bsr_bytes[i]; 217 cur->bsr_stride = bsr_stride[i]; 218 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); 219 220 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */ 221 /* we can only map 4k of it, so only advertise the 4k in sysfs */ 222 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE) 223 cur->bsr_len = 4096; 224 225 switch(cur->bsr_bytes) { 226 case 8: 227 cur->bsr_type = BSR_8; 228 break; 229 case 16: 230 cur->bsr_type = BSR_16; 231 break; 232 case 64: 233 cur->bsr_type = BSR_64; 234 break; 235 case 128: 236 cur->bsr_type = BSR_128; 237 break; 238 case 4096: 239 cur->bsr_type = BSR_4096; 240 break; 241 default: 242 cur->bsr_type = BSR_UNKNOWN; 243 } 244 245 cur->bsr_num = bsr_types[cur->bsr_type]; 246 snprintf(cur->bsr_name, 32, "bsr%d_%d", 247 cur->bsr_bytes, cur->bsr_num); 248 249 cdev_init(&cur->bsr_cdev, &bsr_fops); 250 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1); 251 if (result) { 252 kfree(cur); 253 goto out_err; 254 } 255 256 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, 257 cur, cur->bsr_name); 258 if (IS_ERR(cur->bsr_device)) { 259 printk(KERN_ERR "device_create failed for %s\n", 260 cur->bsr_name); 261 cdev_del(&cur->bsr_cdev); 262 kfree(cur); 263 goto out_err; 264 } 265 266 bsr_types[cur->bsr_type] = cur->bsr_num + 1; 267 list_add_tail(&cur->bsr_list, &bsr_devs); 268 } 269 270 total_bsr_devs += num_bsr_devs; 271 272 return 0; 273 274 out_err: 275 276 bsr_cleanup_devs(); 277 return ret; 278 } 279 280 static int bsr_create_devs(struct device_node *bn) 281 { 282 int ret; 283 284 while (bn) { 285 ret = bsr_add_node(bn); 286 if (ret) { 287 of_node_put(bn); 288 return ret; 289 } 290 bn = of_find_compatible_node(bn, NULL, "ibm,bsr"); 291 } 292 return 0; 293 } 294 295 static int __init bsr_init(void) 296 { 297 struct device_node *np; 298 dev_t bsr_dev; 299 int ret = -ENODEV; 300 int result; 301 302 np = of_find_compatible_node(NULL, NULL, "ibm,bsr"); 303 if (!np) 304 goto out_err; 305 306 bsr_class = class_create(THIS_MODULE, "bsr"); 307 if (IS_ERR(bsr_class)) { 308 printk(KERN_ERR "class_create() failed for bsr_class\n"); 309 goto out_err_1; 310 } 311 bsr_class->dev_attrs = bsr_dev_attrs; 312 313 result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr"); 314 bsr_major = MAJOR(bsr_dev); 315 if (result < 0) { 316 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n"); 317 goto out_err_2; 318 } 319 320 if ((ret = bsr_create_devs(np)) < 0) { 321 np = NULL; 322 goto out_err_3; 323 } 324 325 return 0; 326 327 out_err_3: 328 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS); 329 330 out_err_2: 331 class_destroy(bsr_class); 332 333 out_err_1: 334 of_node_put(np); 335 336 out_err: 337 338 return ret; 339 } 340 341 static void __exit bsr_exit(void) 342 { 343 344 bsr_cleanup_devs(); 345 346 if (bsr_class) 347 class_destroy(bsr_class); 348 349 if (bsr_major) 350 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS); 351 } 352 353 module_init(bsr_init); 354 module_exit(bsr_exit); 355 MODULE_LICENSE("GPL"); 356 MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>"); 357