Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* IBM POWER Barrier Synchronization Register Driver
3 *
4 * Copyright IBM Corporation 2008
5 *
6 * Author: Sonny Rao <sonnyrao@us.ibm.com>
7 */
8
9#include <linux/device.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/fs.h>
14#include <linux/module.h>
15#include <linux/cdev.h>
16#include <linux/list.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20
21/*
22 This driver exposes a special register which can be used for fast
23 synchronization across a large SMP machine. The hardware is exposed
24 as an array of bytes where each process will write to one of the bytes to
25 indicate it has finished the current stage and this update is broadcast to
26 all processors without having to bounce a cacheline between them. In
27 POWER5 and POWER6 there is one of these registers per SMP, but it is
28 presented in two forms; first, it is given as a whole and then as a number
29 of smaller registers which alias to parts of the single whole register.
30 This can potentially allow multiple groups of processes to each have their
31 own private synchronization device.
32
33 Note that this hardware *must* be written to using *only* single byte writes.
34 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
35 this region is treated as cache-inhibited processes should also use a
36 full sync before and after writing to the BSR to ensure all stores and
37 the BSR update have made it to all chips in the system
38*/
39
40/* This is arbitrary number, up to Power6 it's been 17 or fewer */
41#define BSR_MAX_DEVS (32)
42
43struct bsr_dev {
44 u64 bsr_addr; /* Real address */
45 u64 bsr_len; /* length of mem region we can map */
46 unsigned bsr_bytes; /* size of the BSR reg itself */
47 unsigned bsr_stride; /* interval at which BSR repeats in the page */
48 unsigned bsr_type; /* maps to enum below */
49 unsigned bsr_num; /* bsr id number for its type */
50 int bsr_minor;
51
52 struct list_head bsr_list;
53
54 dev_t bsr_dev;
55 struct cdev bsr_cdev;
56 struct device *bsr_device;
57 char bsr_name[32];
58
59};
60
61static unsigned total_bsr_devs;
62static LIST_HEAD(bsr_devs);
63static int bsr_major;
64
65enum {
66 BSR_8 = 0,
67 BSR_16 = 1,
68 BSR_64 = 2,
69 BSR_128 = 3,
70 BSR_4096 = 4,
71 BSR_UNKNOWN = 5,
72 BSR_MAX = 6,
73};
74
75static unsigned bsr_types[BSR_MAX];
76
77static ssize_t
78bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
79{
80 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
81 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
82}
83static DEVICE_ATTR_RO(bsr_size);
84
85static ssize_t
86bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
87{
88 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
89 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
90}
91static DEVICE_ATTR_RO(bsr_stride);
92
93static ssize_t
94bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
95{
96 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
97 return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
98}
99static DEVICE_ATTR_RO(bsr_length);
100
101static struct attribute *bsr_dev_attrs[] = {
102 &dev_attr_bsr_size.attr,
103 &dev_attr_bsr_stride.attr,
104 &dev_attr_bsr_length.attr,
105 NULL,
106};
107ATTRIBUTE_GROUPS(bsr_dev);
108
109static const struct class bsr_class = {
110 .name = "bsr",
111 .dev_groups = bsr_dev_groups,
112};
113
114static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
115{
116 unsigned long size = vma->vm_end - vma->vm_start;
117 struct bsr_dev *dev = filp->private_data;
118 int ret;
119
120 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
121
122 /* check for the case of a small BSR device and map one 4k page for it*/
123 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
124 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
125 vma->vm_page_prot);
126 else if (size <= dev->bsr_len)
127 ret = io_remap_pfn_range(vma, vma->vm_start,
128 dev->bsr_addr >> PAGE_SHIFT,
129 size, vma->vm_page_prot);
130 else
131 return -EINVAL;
132
133 if (ret)
134 return -EAGAIN;
135
136 return 0;
137}
138
139static int bsr_open(struct inode *inode, struct file *filp)
140{
141 struct cdev *cdev = inode->i_cdev;
142 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
143
144 filp->private_data = dev;
145 return 0;
146}
147
148static const struct file_operations bsr_fops = {
149 .owner = THIS_MODULE,
150 .mmap = bsr_mmap,
151 .open = bsr_open,
152 .llseek = noop_llseek,
153};
154
155static void bsr_cleanup_devs(void)
156{
157 struct bsr_dev *cur, *n;
158
159 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
160 if (cur->bsr_device) {
161 cdev_del(&cur->bsr_cdev);
162 device_del(cur->bsr_device);
163 }
164 list_del(&cur->bsr_list);
165 kfree(cur);
166 }
167}
168
169static int bsr_add_node(struct device_node *bn)
170{
171 int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
172 const u32 *bsr_stride;
173 const u32 *bsr_bytes;
174 unsigned i;
175 int ret = -ENODEV;
176
177 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
178 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
179
180 if (!bsr_stride || !bsr_bytes ||
181 (bsr_stride_len != bsr_bytes_len)) {
182 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
183 return ret;
184 }
185
186 num_bsr_devs = bsr_bytes_len / sizeof(u32);
187
188 for (i = 0 ; i < num_bsr_devs; i++) {
189 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
190 GFP_KERNEL);
191 struct resource res;
192 int result;
193
194 if (!cur) {
195 printk(KERN_ERR "Unable to alloc bsr dev\n");
196 ret = -ENOMEM;
197 goto out_err;
198 }
199
200 result = of_address_to_resource(bn, i, &res);
201 if (result < 0) {
202 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
203 kfree(cur);
204 continue;
205 }
206
207 cur->bsr_minor = i + total_bsr_devs;
208 cur->bsr_addr = res.start;
209 cur->bsr_len = resource_size(&res);
210 cur->bsr_bytes = bsr_bytes[i];
211 cur->bsr_stride = bsr_stride[i];
212 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
213
214 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
215 /* we can only map 4k of it, so only advertise the 4k in sysfs */
216 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
217 cur->bsr_len = 4096;
218
219 switch(cur->bsr_bytes) {
220 case 8:
221 cur->bsr_type = BSR_8;
222 break;
223 case 16:
224 cur->bsr_type = BSR_16;
225 break;
226 case 64:
227 cur->bsr_type = BSR_64;
228 break;
229 case 128:
230 cur->bsr_type = BSR_128;
231 break;
232 case 4096:
233 cur->bsr_type = BSR_4096;
234 break;
235 default:
236 cur->bsr_type = BSR_UNKNOWN;
237 }
238
239 cur->bsr_num = bsr_types[cur->bsr_type];
240 snprintf(cur->bsr_name, 32, "bsr%d_%d",
241 cur->bsr_bytes, cur->bsr_num);
242
243 cdev_init(&cur->bsr_cdev, &bsr_fops);
244 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
245 if (result) {
246 kfree(cur);
247 goto out_err;
248 }
249
250 cur->bsr_device = device_create(&bsr_class, NULL, cur->bsr_dev,
251 cur, "%s", cur->bsr_name);
252 if (IS_ERR(cur->bsr_device)) {
253 printk(KERN_ERR "device_create failed for %s\n",
254 cur->bsr_name);
255 cdev_del(&cur->bsr_cdev);
256 kfree(cur);
257 goto out_err;
258 }
259
260 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
261 list_add_tail(&cur->bsr_list, &bsr_devs);
262 }
263
264 total_bsr_devs += num_bsr_devs;
265
266 return 0;
267
268 out_err:
269
270 bsr_cleanup_devs();
271 return ret;
272}
273
274static int bsr_create_devs(struct device_node *bn)
275{
276 int ret;
277
278 while (bn) {
279 ret = bsr_add_node(bn);
280 if (ret) {
281 of_node_put(bn);
282 return ret;
283 }
284 bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
285 }
286 return 0;
287}
288
289static int __init bsr_init(void)
290{
291 struct device_node *np;
292 dev_t bsr_dev;
293 int ret = -ENODEV;
294
295 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
296 if (!np)
297 goto out_err;
298
299 ret = class_register(&bsr_class);
300 if (ret)
301 goto out_err_1;
302
303 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
304 bsr_major = MAJOR(bsr_dev);
305 if (ret < 0) {
306 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
307 goto out_err_2;
308 }
309
310 ret = bsr_create_devs(np);
311 if (ret < 0) {
312 np = NULL;
313 goto out_err_3;
314 }
315
316 return 0;
317
318 out_err_3:
319 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
320
321 out_err_2:
322 class_unregister(&bsr_class);
323
324 out_err_1:
325 of_node_put(np);
326
327 out_err:
328
329 return ret;
330}
331
332static void __exit bsr_exit(void)
333{
334
335 bsr_cleanup_devs();
336
337 class_unregister(&bsr_class);
338
339 if (bsr_major)
340 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
341}
342
343module_init(bsr_init);
344module_exit(bsr_exit);
345MODULE_LICENSE("GPL");
346MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* IBM POWER Barrier Synchronization Register Driver
3 *
4 * Copyright IBM Corporation 2008
5 *
6 * Author: Sonny Rao <sonnyrao@us.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h>
14#include <linux/fs.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20#include <asm/pgtable.h>
21#include <asm/io.h>
22
23/*
24 This driver exposes a special register which can be used for fast
25 synchronization across a large SMP machine. The hardware is exposed
26 as an array of bytes where each process will write to one of the bytes to
27 indicate it has finished the current stage and this update is broadcast to
28 all processors without having to bounce a cacheline between them. In
29 POWER5 and POWER6 there is one of these registers per SMP, but it is
30 presented in two forms; first, it is given as a whole and then as a number
31 of smaller registers which alias to parts of the single whole register.
32 This can potentially allow multiple groups of processes to each have their
33 own private synchronization device.
34
35 Note that this hardware *must* be written to using *only* single byte writes.
36 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
37 this region is treated as cache-inhibited processes should also use a
38 full sync before and after writing to the BSR to ensure all stores and
39 the BSR update have made it to all chips in the system
40*/
41
42/* This is arbitrary number, up to Power6 it's been 17 or fewer */
43#define BSR_MAX_DEVS (32)
44
45struct bsr_dev {
46 u64 bsr_addr; /* Real address */
47 u64 bsr_len; /* length of mem region we can map */
48 unsigned bsr_bytes; /* size of the BSR reg itself */
49 unsigned bsr_stride; /* interval at which BSR repeats in the page */
50 unsigned bsr_type; /* maps to enum below */
51 unsigned bsr_num; /* bsr id number for its type */
52 int bsr_minor;
53
54 struct list_head bsr_list;
55
56 dev_t bsr_dev;
57 struct cdev bsr_cdev;
58 struct device *bsr_device;
59 char bsr_name[32];
60
61};
62
63static unsigned total_bsr_devs;
64static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
65static struct class *bsr_class;
66static int bsr_major;
67
68enum {
69 BSR_8 = 0,
70 BSR_16 = 1,
71 BSR_64 = 2,
72 BSR_128 = 3,
73 BSR_4096 = 4,
74 BSR_UNKNOWN = 5,
75 BSR_MAX = 6,
76};
77
78static unsigned bsr_types[BSR_MAX];
79
80static ssize_t
81bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
82{
83 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
84 return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
85}
86static DEVICE_ATTR_RO(bsr_size);
87
88static ssize_t
89bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
90{
91 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
92 return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
93}
94static DEVICE_ATTR_RO(bsr_stride);
95
96static ssize_t
97bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
98{
99 struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
100 return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
101}
102static DEVICE_ATTR_RO(bsr_length);
103
104static struct attribute *bsr_dev_attrs[] = {
105 &dev_attr_bsr_size.attr,
106 &dev_attr_bsr_stride.attr,
107 &dev_attr_bsr_length.attr,
108 NULL,
109};
110ATTRIBUTE_GROUPS(bsr_dev);
111
112static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
113{
114 unsigned long size = vma->vm_end - vma->vm_start;
115 struct bsr_dev *dev = filp->private_data;
116 int ret;
117
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119
120 /* check for the case of a small BSR device and map one 4k page for it*/
121 if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
122 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
123 vma->vm_page_prot);
124 else if (size <= dev->bsr_len)
125 ret = io_remap_pfn_range(vma, vma->vm_start,
126 dev->bsr_addr >> PAGE_SHIFT,
127 size, vma->vm_page_prot);
128 else
129 return -EINVAL;
130
131 if (ret)
132 return -EAGAIN;
133
134 return 0;
135}
136
137static int bsr_open(struct inode *inode, struct file *filp)
138{
139 struct cdev *cdev = inode->i_cdev;
140 struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
141
142 filp->private_data = dev;
143 return 0;
144}
145
146static const struct file_operations bsr_fops = {
147 .owner = THIS_MODULE,
148 .mmap = bsr_mmap,
149 .open = bsr_open,
150 .llseek = noop_llseek,
151};
152
153static void bsr_cleanup_devs(void)
154{
155 struct bsr_dev *cur, *n;
156
157 list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
158 if (cur->bsr_device) {
159 cdev_del(&cur->bsr_cdev);
160 device_del(cur->bsr_device);
161 }
162 list_del(&cur->bsr_list);
163 kfree(cur);
164 }
165}
166
167static int bsr_add_node(struct device_node *bn)
168{
169 int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
170 const u32 *bsr_stride;
171 const u32 *bsr_bytes;
172 unsigned i;
173 int ret = -ENODEV;
174
175 bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
176 bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
177
178 if (!bsr_stride || !bsr_bytes ||
179 (bsr_stride_len != bsr_bytes_len)) {
180 printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
181 return ret;
182 }
183
184 num_bsr_devs = bsr_bytes_len / sizeof(u32);
185
186 for (i = 0 ; i < num_bsr_devs; i++) {
187 struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
188 GFP_KERNEL);
189 struct resource res;
190 int result;
191
192 if (!cur) {
193 printk(KERN_ERR "Unable to alloc bsr dev\n");
194 ret = -ENOMEM;
195 goto out_err;
196 }
197
198 result = of_address_to_resource(bn, i, &res);
199 if (result < 0) {
200 printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
201 kfree(cur);
202 continue;
203 }
204
205 cur->bsr_minor = i + total_bsr_devs;
206 cur->bsr_addr = res.start;
207 cur->bsr_len = resource_size(&res);
208 cur->bsr_bytes = bsr_bytes[i];
209 cur->bsr_stride = bsr_stride[i];
210 cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
211
212 /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
213 /* we can only map 4k of it, so only advertise the 4k in sysfs */
214 if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
215 cur->bsr_len = 4096;
216
217 switch(cur->bsr_bytes) {
218 case 8:
219 cur->bsr_type = BSR_8;
220 break;
221 case 16:
222 cur->bsr_type = BSR_16;
223 break;
224 case 64:
225 cur->bsr_type = BSR_64;
226 break;
227 case 128:
228 cur->bsr_type = BSR_128;
229 break;
230 case 4096:
231 cur->bsr_type = BSR_4096;
232 break;
233 default:
234 cur->bsr_type = BSR_UNKNOWN;
235 }
236
237 cur->bsr_num = bsr_types[cur->bsr_type];
238 snprintf(cur->bsr_name, 32, "bsr%d_%d",
239 cur->bsr_bytes, cur->bsr_num);
240
241 cdev_init(&cur->bsr_cdev, &bsr_fops);
242 result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
243 if (result) {
244 kfree(cur);
245 goto out_err;
246 }
247
248 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
249 cur, "%s", cur->bsr_name);
250 if (IS_ERR(cur->bsr_device)) {
251 printk(KERN_ERR "device_create failed for %s\n",
252 cur->bsr_name);
253 cdev_del(&cur->bsr_cdev);
254 kfree(cur);
255 goto out_err;
256 }
257
258 bsr_types[cur->bsr_type] = cur->bsr_num + 1;
259 list_add_tail(&cur->bsr_list, &bsr_devs);
260 }
261
262 total_bsr_devs += num_bsr_devs;
263
264 return 0;
265
266 out_err:
267
268 bsr_cleanup_devs();
269 return ret;
270}
271
272static int bsr_create_devs(struct device_node *bn)
273{
274 int ret;
275
276 while (bn) {
277 ret = bsr_add_node(bn);
278 if (ret) {
279 of_node_put(bn);
280 return ret;
281 }
282 bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
283 }
284 return 0;
285}
286
287static int __init bsr_init(void)
288{
289 struct device_node *np;
290 dev_t bsr_dev;
291 int ret = -ENODEV;
292
293 np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
294 if (!np)
295 goto out_err;
296
297 bsr_class = class_create(THIS_MODULE, "bsr");
298 if (IS_ERR(bsr_class)) {
299 printk(KERN_ERR "class_create() failed for bsr_class\n");
300 ret = PTR_ERR(bsr_class);
301 goto out_err_1;
302 }
303 bsr_class->dev_groups = bsr_dev_groups;
304
305 ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
306 bsr_major = MAJOR(bsr_dev);
307 if (ret < 0) {
308 printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
309 goto out_err_2;
310 }
311
312 ret = bsr_create_devs(np);
313 if (ret < 0) {
314 np = NULL;
315 goto out_err_3;
316 }
317
318 return 0;
319
320 out_err_3:
321 unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
322
323 out_err_2:
324 class_destroy(bsr_class);
325
326 out_err_1:
327 of_node_put(np);
328
329 out_err:
330
331 return ret;
332}
333
334static void __exit bsr_exit(void)
335{
336
337 bsr_cleanup_devs();
338
339 if (bsr_class)
340 class_destroy(bsr_class);
341
342 if (bsr_major)
343 unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
344}
345
346module_init(bsr_init);
347module_exit(bsr_exit);
348MODULE_LICENSE("GPL");
349MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");