Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54#ifdef CONFIG_UEVENT_HELPER
55/* uevent helper program, used during early boot */
56static ssize_t uevent_helper_show(struct kobject *kobj,
57 struct kobj_attribute *attr, char *buf)
58{
59 return sprintf(buf, "%s\n", uevent_helper);
60}
61static ssize_t uevent_helper_store(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 const char *buf, size_t count)
64{
65 if (count+1 > UEVENT_HELPER_PATH_LEN)
66 return -ENOENT;
67 memcpy(uevent_helper, buf, count);
68 uevent_helper[count] = '\0';
69 if (count && uevent_helper[count-1] == '\n')
70 uevent_helper[count-1] = '\0';
71 return count;
72}
73KERNEL_ATTR_RW(uevent_helper);
74#endif
75
76#ifdef CONFIG_PROFILING
77static ssize_t profiling_show(struct kobject *kobj,
78 struct kobj_attribute *attr, char *buf)
79{
80 return sprintf(buf, "%d\n", prof_on);
81}
82static ssize_t profiling_store(struct kobject *kobj,
83 struct kobj_attribute *attr,
84 const char *buf, size_t count)
85{
86 int ret;
87
88 if (prof_on)
89 return -EEXIST;
90 /*
91 * This eventually calls into get_option() which
92 * has a ton of callers and is not const. It is
93 * easiest to cast it away here.
94 */
95 profile_setup((char *)buf);
96 ret = profile_init();
97 if (ret)
98 return ret;
99 ret = create_proc_profile();
100 if (ret)
101 return ret;
102 return count;
103}
104KERNEL_ATTR_RW(profiling);
105#endif
106
107#ifdef CONFIG_KEXEC_CORE
108static ssize_t kexec_loaded_show(struct kobject *kobj,
109 struct kobj_attribute *attr, char *buf)
110{
111 return sprintf(buf, "%d\n", !!kexec_image);
112}
113KERNEL_ATTR_RO(kexec_loaded);
114
115static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
116 struct kobj_attribute *attr, char *buf)
117{
118 return sprintf(buf, "%d\n", kexec_crash_loaded());
119}
120KERNEL_ATTR_RO(kexec_crash_loaded);
121
122static ssize_t kexec_crash_size_show(struct kobject *kobj,
123 struct kobj_attribute *attr, char *buf)
124{
125 ssize_t size = crash_get_memory_size();
126
127 if (size < 0)
128 return size;
129
130 return sprintf(buf, "%zd\n", size);
131}
132static ssize_t kexec_crash_size_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned long cnt;
137 int ret;
138
139 if (kstrtoul(buf, 0, &cnt))
140 return -EINVAL;
141
142 ret = crash_shrink_memory(cnt);
143 return ret < 0 ? ret : count;
144}
145KERNEL_ATTR_RW(kexec_crash_size);
146
147#endif /* CONFIG_KEXEC_CORE */
148
149#ifdef CONFIG_CRASH_CORE
150
151static ssize_t vmcoreinfo_show(struct kobject *kobj,
152 struct kobj_attribute *attr, char *buf)
153{
154 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
155 return sprintf(buf, "%pa %x\n", &vmcore_base,
156 (unsigned int)VMCOREINFO_NOTE_SIZE);
157}
158KERNEL_ATTR_RO(vmcoreinfo);
159
160#endif /* CONFIG_CRASH_CORE */
161
162/* whether file capabilities are enabled */
163static ssize_t fscaps_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf)
165{
166 return sprintf(buf, "%d\n", file_caps_enabled);
167}
168KERNEL_ATTR_RO(fscaps);
169
170#ifndef CONFIG_TINY_RCU
171int rcu_expedited;
172static ssize_t rcu_expedited_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf)
174{
175 return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
176}
177static ssize_t rcu_expedited_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t count)
180{
181 if (kstrtoint(buf, 0, &rcu_expedited))
182 return -EINVAL;
183
184 return count;
185}
186KERNEL_ATTR_RW(rcu_expedited);
187
188int rcu_normal;
189static ssize_t rcu_normal_show(struct kobject *kobj,
190 struct kobj_attribute *attr, char *buf)
191{
192 return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
193}
194static ssize_t rcu_normal_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
197{
198 if (kstrtoint(buf, 0, &rcu_normal))
199 return -EINVAL;
200
201 return count;
202}
203KERNEL_ATTR_RW(rcu_normal);
204#endif /* #ifndef CONFIG_TINY_RCU */
205
206/*
207 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
208 */
209extern const void __start_notes __weak;
210extern const void __stop_notes __weak;
211#define notes_size (&__stop_notes - &__start_notes)
212
213static ssize_t notes_read(struct file *filp, struct kobject *kobj,
214 struct bin_attribute *bin_attr,
215 char *buf, loff_t off, size_t count)
216{
217 memcpy(buf, &__start_notes + off, count);
218 return count;
219}
220
221static struct bin_attribute notes_attr __ro_after_init = {
222 .attr = {
223 .name = "notes",
224 .mode = S_IRUGO,
225 },
226 .read = ¬es_read,
227};
228
229struct kobject *kernel_kobj;
230EXPORT_SYMBOL_GPL(kernel_kobj);
231
232static struct attribute * kernel_attrs[] = {
233 &fscaps_attr.attr,
234 &uevent_seqnum_attr.attr,
235 &cpu_byteorder_attr.attr,
236#ifdef CONFIG_UEVENT_HELPER
237 &uevent_helper_attr.attr,
238#endif
239#ifdef CONFIG_PROFILING
240 &profiling_attr.attr,
241#endif
242#ifdef CONFIG_KEXEC_CORE
243 &kexec_loaded_attr.attr,
244 &kexec_crash_loaded_attr.attr,
245 &kexec_crash_size_attr.attr,
246#endif
247#ifdef CONFIG_CRASH_CORE
248 &vmcoreinfo_attr.attr,
249#endif
250#ifndef CONFIG_TINY_RCU
251 &rcu_expedited_attr.attr,
252 &rcu_normal_attr.attr,
253#endif
254 NULL
255};
256
257static const struct attribute_group kernel_attr_group = {
258 .attrs = kernel_attrs,
259};
260
261static int __init ksysfs_init(void)
262{
263 int error;
264
265 kernel_kobj = kobject_create_and_add("kernel", NULL);
266 if (!kernel_kobj) {
267 error = -ENOMEM;
268 goto exit;
269 }
270 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
271 if (error)
272 goto kset_exit;
273
274 if (notes_size > 0) {
275 notes_attr.size = notes_size;
276 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
277 if (error)
278 goto group_exit;
279 }
280
281 return 0;
282
283group_exit:
284 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
285kset_exit:
286 kobject_put(kernel_kobj);
287exit:
288 return error;
289}
290
291core_initcall(ksysfs_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sysfs_emit(buf, "%llu\n", (unsigned long long)uevent_seqnum);
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54/* address bits */
55static ssize_t address_bits_show(struct kobject *kobj,
56 struct kobj_attribute *attr, char *buf)
57{
58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */);
59}
60KERNEL_ATTR_RO(address_bits);
61
62#ifdef CONFIG_UEVENT_HELPER
63/* uevent helper program, used during early boot */
64static ssize_t uevent_helper_show(struct kobject *kobj,
65 struct kobj_attribute *attr, char *buf)
66{
67 return sysfs_emit(buf, "%s\n", uevent_helper);
68}
69static ssize_t uevent_helper_store(struct kobject *kobj,
70 struct kobj_attribute *attr,
71 const char *buf, size_t count)
72{
73 if (count+1 > UEVENT_HELPER_PATH_LEN)
74 return -ENOENT;
75 memcpy(uevent_helper, buf, count);
76 uevent_helper[count] = '\0';
77 if (count && uevent_helper[count-1] == '\n')
78 uevent_helper[count-1] = '\0';
79 return count;
80}
81KERNEL_ATTR_RW(uevent_helper);
82#endif
83
84#ifdef CONFIG_PROFILING
85static ssize_t profiling_show(struct kobject *kobj,
86 struct kobj_attribute *attr, char *buf)
87{
88 return sysfs_emit(buf, "%d\n", prof_on);
89}
90static ssize_t profiling_store(struct kobject *kobj,
91 struct kobj_attribute *attr,
92 const char *buf, size_t count)
93{
94 int ret;
95
96 if (prof_on)
97 return -EEXIST;
98 /*
99 * This eventually calls into get_option() which
100 * has a ton of callers and is not const. It is
101 * easiest to cast it away here.
102 */
103 profile_setup((char *)buf);
104 ret = profile_init();
105 if (ret)
106 return ret;
107 ret = create_proc_profile();
108 if (ret)
109 return ret;
110 return count;
111}
112KERNEL_ATTR_RW(profiling);
113#endif
114
115#ifdef CONFIG_KEXEC_CORE
116static ssize_t kexec_loaded_show(struct kobject *kobj,
117 struct kobj_attribute *attr, char *buf)
118{
119 return sysfs_emit(buf, "%d\n", !!kexec_image);
120}
121KERNEL_ATTR_RO(kexec_loaded);
122
123static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
124 struct kobj_attribute *attr, char *buf)
125{
126 return sysfs_emit(buf, "%d\n", kexec_crash_loaded());
127}
128KERNEL_ATTR_RO(kexec_crash_loaded);
129
130static ssize_t kexec_crash_size_show(struct kobject *kobj,
131 struct kobj_attribute *attr, char *buf)
132{
133 ssize_t size = crash_get_memory_size();
134
135 if (size < 0)
136 return size;
137
138 return sysfs_emit(buf, "%zd\n", size);
139}
140static ssize_t kexec_crash_size_store(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 const char *buf, size_t count)
143{
144 unsigned long cnt;
145 int ret;
146
147 if (kstrtoul(buf, 0, &cnt))
148 return -EINVAL;
149
150 ret = crash_shrink_memory(cnt);
151 return ret < 0 ? ret : count;
152}
153KERNEL_ATTR_RW(kexec_crash_size);
154
155#endif /* CONFIG_KEXEC_CORE */
156
157#ifdef CONFIG_CRASH_CORE
158
159static ssize_t vmcoreinfo_show(struct kobject *kobj,
160 struct kobj_attribute *attr, char *buf)
161{
162 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
163 return sysfs_emit(buf, "%pa %x\n", &vmcore_base,
164 (unsigned int)VMCOREINFO_NOTE_SIZE);
165}
166KERNEL_ATTR_RO(vmcoreinfo);
167
168#ifdef CONFIG_CRASH_HOTPLUG
169static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj,
170 struct kobj_attribute *attr, char *buf)
171{
172 unsigned int sz = crash_get_elfcorehdr_size();
173
174 return sysfs_emit(buf, "%u\n", sz);
175}
176KERNEL_ATTR_RO(crash_elfcorehdr_size);
177
178#endif
179
180#endif /* CONFIG_CRASH_CORE */
181
182/* whether file capabilities are enabled */
183static ssize_t fscaps_show(struct kobject *kobj,
184 struct kobj_attribute *attr, char *buf)
185{
186 return sysfs_emit(buf, "%d\n", file_caps_enabled);
187}
188KERNEL_ATTR_RO(fscaps);
189
190#ifndef CONFIG_TINY_RCU
191int rcu_expedited;
192static ssize_t rcu_expedited_show(struct kobject *kobj,
193 struct kobj_attribute *attr, char *buf)
194{
195 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited));
196}
197static ssize_t rcu_expedited_store(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 const char *buf, size_t count)
200{
201 if (kstrtoint(buf, 0, &rcu_expedited))
202 return -EINVAL;
203
204 return count;
205}
206KERNEL_ATTR_RW(rcu_expedited);
207
208int rcu_normal;
209static ssize_t rcu_normal_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
211{
212 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal));
213}
214static ssize_t rcu_normal_store(struct kobject *kobj,
215 struct kobj_attribute *attr,
216 const char *buf, size_t count)
217{
218 if (kstrtoint(buf, 0, &rcu_normal))
219 return -EINVAL;
220
221 return count;
222}
223KERNEL_ATTR_RW(rcu_normal);
224#endif /* #ifndef CONFIG_TINY_RCU */
225
226/*
227 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
228 */
229extern const void __start_notes __weak;
230extern const void __stop_notes __weak;
231#define notes_size (&__stop_notes - &__start_notes)
232
233static ssize_t notes_read(struct file *filp, struct kobject *kobj,
234 struct bin_attribute *bin_attr,
235 char *buf, loff_t off, size_t count)
236{
237 memcpy(buf, &__start_notes + off, count);
238 return count;
239}
240
241static struct bin_attribute notes_attr __ro_after_init = {
242 .attr = {
243 .name = "notes",
244 .mode = S_IRUGO,
245 },
246 .read = ¬es_read,
247};
248
249struct kobject *kernel_kobj;
250EXPORT_SYMBOL_GPL(kernel_kobj);
251
252static struct attribute * kernel_attrs[] = {
253 &fscaps_attr.attr,
254 &uevent_seqnum_attr.attr,
255 &cpu_byteorder_attr.attr,
256 &address_bits_attr.attr,
257#ifdef CONFIG_UEVENT_HELPER
258 &uevent_helper_attr.attr,
259#endif
260#ifdef CONFIG_PROFILING
261 &profiling_attr.attr,
262#endif
263#ifdef CONFIG_KEXEC_CORE
264 &kexec_loaded_attr.attr,
265 &kexec_crash_loaded_attr.attr,
266 &kexec_crash_size_attr.attr,
267#endif
268#ifdef CONFIG_CRASH_CORE
269 &vmcoreinfo_attr.attr,
270#ifdef CONFIG_CRASH_HOTPLUG
271 &crash_elfcorehdr_size_attr.attr,
272#endif
273#endif
274#ifndef CONFIG_TINY_RCU
275 &rcu_expedited_attr.attr,
276 &rcu_normal_attr.attr,
277#endif
278 NULL
279};
280
281static const struct attribute_group kernel_attr_group = {
282 .attrs = kernel_attrs,
283};
284
285static int __init ksysfs_init(void)
286{
287 int error;
288
289 kernel_kobj = kobject_create_and_add("kernel", NULL);
290 if (!kernel_kobj) {
291 error = -ENOMEM;
292 goto exit;
293 }
294 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
295 if (error)
296 goto kset_exit;
297
298 if (notes_size > 0) {
299 notes_attr.size = notes_size;
300 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
301 if (error)
302 goto group_exit;
303 }
304
305 return 0;
306
307group_exit:
308 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
309kset_exit:
310 kobject_put(kernel_kobj);
311exit:
312 return error;
313}
314
315core_initcall(ksysfs_init);