Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* IBM POWER Barrier Synchronization Register Driver
3 *
4 * Copyright IBM Corporation 2008
5 *
6 * Author: Sonny Rao <sonnyrao@us.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h>
14#include <linux/fs.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20#include <asm/io.h>
21
22/*
23 This driver exposes a special register which can be used for fast
24 synchronization across a large SMP machine. The hardware is exposed
25 as an array of bytes where each process will write to one of the bytes to
26 indicate it has finished the current stage and this update is broadcast to
27 all processors without having to bounce a cacheline between them. In
28 POWER5 and POWER6 there is one of these registers per SMP, but it is
29 presented in two forms; first, it is given as a whole and then as a number
30 of smaller registers which alias to parts of the single whole register.
31 This can potentially allow multiple groups of processes to each have their
32 own private synchronization device.
33
34 Note that this hardware *must* be written to using *only* single byte writes.
35 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
36 this region is treated as cache-inhibited processes should also use a
37 full sync before and after writing to the BSR to ensure all stores and
38 the BSR update have made it to all chips in the system
39*/
40
41/* This is arbitrary number, up to Power6 it's been 17 or fewer */
42#define BSR_MAX_DEVS (32)
43
44struct bsr_dev {
45 u64 bsr_addr; /* Real address */
46 u64 bsr_len; /* length of mem region we can map */
47 unsigned bsr_bytes; /* size of the BSR reg itself */
48 unsigned bsr_stride; /* interval at which BSR repeats in the page */
49 unsigned bsr_type; /* maps to enum below */
50 unsigned bsr_num; /* bsr id number for its type */
51 int bsr_minor;
52
53 struct list_head bsr_list;
54
55 dev_t bsr_dev;
56 struct cdev bsr_cdev;
57 struct device *bsr_device;
58 char bsr_name[32];
59
60};
61
62static unsigned total_bsr_devs;
63static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
64static struct class *bsr_class;
65static int bsr_major;
66
67enum {
68 BSR_8 = 0,
69 BSR_16 = 1,
70 BSR_64 = 2,
71 BSR_128 = 3,
72 BSR_4096 = 4,
73 BSR_UNKNOWN = 5,
74 BSR_MAX = 6,
75};
76
77static unsigned bsr_types[BSR_MAX];
78
79static ssize_t
80bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
81{
82 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
83 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
84}
85static DEVICE_ATTR_RO(bsr_size);
86
87static ssize_t
88bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
89{
90 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
91 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
92}
93static DEVICE_ATTR_RO(bsr_stride);
94
95static ssize_t
96bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
97{
98 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
99 return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
100}
101static DEVICE_ATTR_RO(bsr_length);
102
103static struct attribute *bsr_dev_attrs[] = {
104 &dev_attr_bsr_size.attr,
105 &dev_attr_bsr_stride.attr,
106 &dev_attr_bsr_length.attr,
107 NULL,
108};
109ATTRIBUTE_GROUPS(bsr_dev);
110
111static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
112{
113 unsigned long size = vma->vm_end - vma->vm_start;
114 struct bsr_dev *dev = filp->private_data;
115 int ret;
116
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119 /* check for the case of a small BSR device and map one 4k page for it*/
120 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
121 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
122 vma->vm_page_prot);
123 else if (size <= dev->bsr_len)
124 ret = io_remap_pfn_range(vma, vma->vm_start,
125 dev->bsr_addr >> PAGE_SHIFT,
126 size, vma->vm_page_prot);
127 else
128 return -EINVAL;
129
130 if (ret)
131 return -EAGAIN;
132
133 return 0;
134}
135
136static int bsr_open(struct inode *inode, struct file *filp)
137{
138 struct cdev *cdev = inode->i_cdev;
139 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
140
141 filp->private_data = dev;
142 return 0;
143}
144
145static const struct file_operations bsr_fops = {
146 .owner = THIS_MODULE,
147 .mmap = bsr_mmap,
148 .open = bsr_open,
149 .llseek = noop_llseek,
150};
151
152static void bsr_cleanup_devs(void)
153{
154 struct bsr_dev *cur, *n;
155
156 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
157 if (cur->bsr_device) {
158 cdev_del(&cur->bsr_cdev);
159 device_del(cur->bsr_device);
160 }
161 list_del(&cur->bsr_list);
162 kfree(cur);
163 }
164}
165
166static int bsr_add_node(struct device_node *bn)
167{
168 int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
169 const u32 *bsr_stride;
170 const u32 *bsr_bytes;
171 unsigned i;
172 int ret = -ENODEV;
173
174 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
175 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
176
177 if (!bsr_stride || !bsr_bytes ||
178 (bsr_stride_len != bsr_bytes_len)) {
179 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
180 return ret;
181 }
182
183 num_bsr_devs = bsr_bytes_len / sizeof(u32);
184
185 for (i = 0 ; i < num_bsr_devs; i++) {
186 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
187 GFP_KERNEL);
188 struct resource res;
189 int result;
190
191 if (!cur) {
192 printk(KERN_ERR "Unable to alloc bsr dev\n");
193 ret = -ENOMEM;
194 goto out_err;
195 }
196
197 result = of_address_to_resource(bn, i, &res);
198 if (result < 0) {
199 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
200 kfree(cur);
201 continue;
202 }
203
204 cur->bsr_minor = i + total_bsr_devs;
205 cur->bsr_addr = res.start;
206 cur->bsr_len = resource_size(&res);
207 cur->bsr_bytes = bsr_bytes[i];
208 cur->bsr_stride = bsr_stride[i];
209 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
210
211 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
212 /* we can only map 4k of it, so only advertise the 4k in sysfs */
213 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
214 cur->bsr_len = 4096;
215
216 switch(cur->bsr_bytes) {
217 case 8:
218 cur->bsr_type = BSR_8;
219 break;
220 case 16:
221 cur->bsr_type = BSR_16;
222 break;
223 case 64:
224 cur->bsr_type = BSR_64;
225 break;
226 case 128:
227 cur->bsr_type = BSR_128;
228 break;
229 case 4096:
230 cur->bsr_type = BSR_4096;
231 break;
232 default:
233 cur->bsr_type = BSR_UNKNOWN;
234 }
235
236 cur->bsr_num = bsr_types[cur->bsr_type];
237 snprintf(cur->bsr_name, 32, "bsr%d_%d",
238 cur->bsr_bytes, cur->bsr_num);
239
240 cdev_init(&cur->bsr_cdev, &bsr_fops);
241 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
242 if (result) {
243 kfree(cur);
244 goto out_err;
245 }
246
247 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
248 cur, "%s", cur->bsr_name);
249 if (IS_ERR(cur->bsr_device)) {
250 printk(KERN_ERR "device_create failed for %s\n",
251 cur->bsr_name);
252 cdev_del(&cur->bsr_cdev);
253 kfree(cur);
254 goto out_err;
255 }
256
257 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
258 list_add_tail(&cur->bsr_list, &bsr_devs);
259 }
260
261 total_bsr_devs += num_bsr_devs;
262
263 return 0;
264
265 out_err:
266
267 bsr_cleanup_devs();
268 return ret;
269}
270
271static int bsr_create_devs(struct device_node *bn)
272{
273 int ret;
274
275 while (bn) {
276 ret = bsr_add_node(bn);
277 if (ret) {
278 of_node_put(bn);
279 return ret;
280 }
281 bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
282 }
283 return 0;
284}
285
286static int __init bsr_init(void)
287{
288 struct device_node *np;
289 dev_t bsr_dev;
290 int ret = -ENODEV;
291
292 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
293 if (!np)
294 goto out_err;
295
296 bsr_class = class_create(THIS_MODULE, "bsr");
297 if (IS_ERR(bsr_class)) {
298 printk(KERN_ERR "class_create() failed for bsr_class\n");
299 ret = PTR_ERR(bsr_class);
300 goto out_err_1;
301 }
302 bsr_class->dev_groups = bsr_dev_groups;
303
304 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
305 bsr_major = MAJOR(bsr_dev);
306 if (ret < 0) {
307 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
308 goto out_err_2;
309 }
310
311 ret = bsr_create_devs(np);
312 if (ret < 0) {
313 np = NULL;
314 goto out_err_3;
315 }
316
317 return 0;
318
319 out_err_3:
320 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
321
322 out_err_2:
323 class_destroy(bsr_class);
324
325 out_err_1:
326 of_node_put(np);
327
328 out_err:
329
330 return ret;
331}
332
333static void __exit bsr_exit(void)
334{
335
336 bsr_cleanup_devs();
337
338 if (bsr_class)
339 class_destroy(bsr_class);
340
341 if (bsr_major)
342 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
343}
344
345module_init(bsr_init);
346module_exit(bsr_exit);
347MODULE_LICENSE("GPL");
348MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* IBM POWER Barrier Synchronization Register Driver
3 *
4 * Copyright IBM Corporation 2008
5 *
6 * Author: Sonny Rao <sonnyrao@us.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h>
14#include <linux/fs.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20#include <asm/pgtable.h>
21#include <asm/io.h>
22
23/*
24 This driver exposes a special register which can be used for fast
25 synchronization across a large SMP machine. The hardware is exposed
26 as an array of bytes where each process will write to one of the bytes to
27 indicate it has finished the current stage and this update is broadcast to
28 all processors without having to bounce a cacheline between them. In
29 POWER5 and POWER6 there is one of these registers per SMP, but it is
30 presented in two forms; first, it is given as a whole and then as a number
31 of smaller registers which alias to parts of the single whole register.
32 This can potentially allow multiple groups of processes to each have their
33 own private synchronization device.
34
35 Note that this hardware *must* be written to using *only* single byte writes.
36 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
37 this region is treated as cache-inhibited processes should also use a
38 full sync before and after writing to the BSR to ensure all stores and
39 the BSR update have made it to all chips in the system
40*/
41
42/* This is arbitrary number, up to Power6 it's been 17 or fewer */
43#define BSR_MAX_DEVS (32)
44
45struct bsr_dev {
46 u64 bsr_addr; /* Real address */
47 u64 bsr_len; /* length of mem region we can map */
48 unsigned bsr_bytes; /* size of the BSR reg itself */
49 unsigned bsr_stride; /* interval at which BSR repeats in the page */
50 unsigned bsr_type; /* maps to enum below */
51 unsigned bsr_num; /* bsr id number for its type */
52 int bsr_minor;
53
54 struct list_head bsr_list;
55
56 dev_t bsr_dev;
57 struct cdev bsr_cdev;
58 struct device *bsr_device;
59 char bsr_name[32];
60
61};
62
63static unsigned total_bsr_devs;
64static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
65static struct class *bsr_class;
66static int bsr_major;
67
68enum {
69 BSR_8 = 0,
70 BSR_16 = 1,
71 BSR_64 = 2,
72 BSR_128 = 3,
73 BSR_4096 = 4,
74 BSR_UNKNOWN = 5,
75 BSR_MAX = 6,
76};
77
78static unsigned bsr_types[BSR_MAX];
79
80static ssize_t
81bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
82{
83 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
84 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
85}
86static DEVICE_ATTR_RO(bsr_size);
87
88static ssize_t
89bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
90{
91 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
92 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
93}
94static DEVICE_ATTR_RO(bsr_stride);
95
96static ssize_t
97bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
98{
99 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
100 return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
101}
102static DEVICE_ATTR_RO(bsr_length);
103
104static struct attribute *bsr_dev_attrs[] = {
105 &dev_attr_bsr_size.attr,
106 &dev_attr_bsr_stride.attr,
107 &dev_attr_bsr_length.attr,
108 NULL,
109};
110ATTRIBUTE_GROUPS(bsr_dev);
111
112static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
113{
114 unsigned long size = vma->vm_end - vma->vm_start;
115 struct bsr_dev *dev = filp->private_data;
116 int ret;
117
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119
120 /* check for the case of a small BSR device and map one 4k page for it*/
121 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
122 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
123 vma->vm_page_prot);
124 else if (size <= dev->bsr_len)
125 ret = io_remap_pfn_range(vma, vma->vm_start,
126 dev->bsr_addr >> PAGE_SHIFT,
127 size, vma->vm_page_prot);
128 else
129 return -EINVAL;
130
131 if (ret)
132 return -EAGAIN;
133
134 return 0;
135}
136
137static int bsr_open(struct inode *inode, struct file *filp)
138{
139 struct cdev *cdev = inode->i_cdev;
140 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
141
142 filp->private_data = dev;
143 return 0;
144}
145
146static const struct file_operations bsr_fops = {
147 .owner = THIS_MODULE,
148 .mmap = bsr_mmap,
149 .open = bsr_open,
150 .llseek = noop_llseek,
151};
152
153static void bsr_cleanup_devs(void)
154{
155 struct bsr_dev *cur, *n;
156
157 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
158 if (cur->bsr_device) {
159 cdev_del(&cur->bsr_cdev);
160 device_del(cur->bsr_device);
161 }
162 list_del(&cur->bsr_list);
163 kfree(cur);
164 }
165}
166
167static int bsr_add_node(struct device_node *bn)
168{
169 int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
170 const u32 *bsr_stride;
171 const u32 *bsr_bytes;
172 unsigned i;
173 int ret = -ENODEV;
174
175 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
176 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
177
178 if (!bsr_stride || !bsr_bytes ||
179 (bsr_stride_len != bsr_bytes_len)) {
180 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
181 return ret;
182 }
183
184 num_bsr_devs = bsr_bytes_len / sizeof(u32);
185
186 for (i = 0 ; i < num_bsr_devs; i++) {
187 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
188 GFP_KERNEL);
189 struct resource res;
190 int result;
191
192 if (!cur) {
193 printk(KERN_ERR "Unable to alloc bsr dev\n");
194 ret = -ENOMEM;
195 goto out_err;
196 }
197
198 result = of_address_to_resource(bn, i, &res);
199 if (result < 0) {
200 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
201 kfree(cur);
202 continue;
203 }
204
205 cur->bsr_minor = i + total_bsr_devs;
206 cur->bsr_addr = res.start;
207 cur->bsr_len = resource_size(&res);
208 cur->bsr_bytes = bsr_bytes[i];
209 cur->bsr_stride = bsr_stride[i];
210 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
211
212 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
213 /* we can only map 4k of it, so only advertise the 4k in sysfs */
214 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
215 cur->bsr_len = 4096;
216
217 switch(cur->bsr_bytes) {
218 case 8:
219 cur->bsr_type = BSR_8;
220 break;
221 case 16:
222 cur->bsr_type = BSR_16;
223 break;
224 case 64:
225 cur->bsr_type = BSR_64;
226 break;
227 case 128:
228 cur->bsr_type = BSR_128;
229 break;
230 case 4096:
231 cur->bsr_type = BSR_4096;
232 break;
233 default:
234 cur->bsr_type = BSR_UNKNOWN;
235 }
236
237 cur->bsr_num = bsr_types[cur->bsr_type];
238 snprintf(cur->bsr_name, 32, "bsr%d_%d",
239 cur->bsr_bytes, cur->bsr_num);
240
241 cdev_init(&cur->bsr_cdev, &bsr_fops);
242 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
243 if (result) {
244 kfree(cur);
245 goto out_err;
246 }
247
248 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
249 cur, "%s", cur->bsr_name);
250 if (IS_ERR(cur->bsr_device)) {
251 printk(KERN_ERR "device_create failed for %s\n",
252 cur->bsr_name);
253 cdev_del(&cur->bsr_cdev);
254 kfree(cur);
255 goto out_err;
256 }
257
258 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
259 list_add_tail(&cur->bsr_list, &bsr_devs);
260 }
261
262 total_bsr_devs += num_bsr_devs;
263
264 return 0;
265
266 out_err:
267
268 bsr_cleanup_devs();
269 return ret;
270}
271
272static int bsr_create_devs(struct device_node *bn)
273{
274 int ret;
275
276 while (bn) {
277 ret = bsr_add_node(bn);
278 if (ret) {
279 of_node_put(bn);
280 return ret;
281 }
282 bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
283 }
284 return 0;
285}
286
287static int __init bsr_init(void)
288{
289 struct device_node *np;
290 dev_t bsr_dev;
291 int ret = -ENODEV;
292
293 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
294 if (!np)
295 goto out_err;
296
297 bsr_class = class_create(THIS_MODULE, "bsr");
298 if (IS_ERR(bsr_class)) {
299 printk(KERN_ERR "class_create() failed for bsr_class\n");
300 ret = PTR_ERR(bsr_class);
301 goto out_err_1;
302 }
303 bsr_class->dev_groups = bsr_dev_groups;
304
305 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
306 bsr_major = MAJOR(bsr_dev);
307 if (ret < 0) {
308 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
309 goto out_err_2;
310 }
311
312 ret = bsr_create_devs(np);
313 if (ret < 0) {
314 np = NULL;
315 goto out_err_3;
316 }
317
318 return 0;
319
320 out_err_3:
321 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
322
323 out_err_2:
324 class_destroy(bsr_class);
325
326 out_err_1:
327 of_node_put(np);
328
329 out_err:
330
331 return ret;
332}
333
334static void __exit bsr_exit(void)
335{
336
337 bsr_cleanup_devs();
338
339 if (bsr_class)
340 class_destroy(bsr_class);
341
342 if (bsr_major)
343 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
344}
345
346module_init(bsr_init);
347module_exit(bsr_exit);
348MODULE_LICENSE("GPL");
349MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");