Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54#ifdef CONFIG_UEVENT_HELPER
55/* uevent helper program, used during early boot */
56static ssize_t uevent_helper_show(struct kobject *kobj,
57 struct kobj_attribute *attr, char *buf)
58{
59 return sprintf(buf, "%s\n", uevent_helper);
60}
61static ssize_t uevent_helper_store(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 const char *buf, size_t count)
64{
65 if (count+1 > UEVENT_HELPER_PATH_LEN)
66 return -ENOENT;
67 memcpy(uevent_helper, buf, count);
68 uevent_helper[count] = '\0';
69 if (count && uevent_helper[count-1] == '\n')
70 uevent_helper[count-1] = '\0';
71 return count;
72}
73KERNEL_ATTR_RW(uevent_helper);
74#endif
75
76#ifdef CONFIG_PROFILING
77static ssize_t profiling_show(struct kobject *kobj,
78 struct kobj_attribute *attr, char *buf)
79{
80 return sprintf(buf, "%d\n", prof_on);
81}
82static ssize_t profiling_store(struct kobject *kobj,
83 struct kobj_attribute *attr,
84 const char *buf, size_t count)
85{
86 int ret;
87
88 if (prof_on)
89 return -EEXIST;
90 /*
91 * This eventually calls into get_option() which
92 * has a ton of callers and is not const. It is
93 * easiest to cast it away here.
94 */
95 profile_setup((char *)buf);
96 ret = profile_init();
97 if (ret)
98 return ret;
99 ret = create_proc_profile();
100 if (ret)
101 return ret;
102 return count;
103}
104KERNEL_ATTR_RW(profiling);
105#endif
106
107#ifdef CONFIG_KEXEC_CORE
108static ssize_t kexec_loaded_show(struct kobject *kobj,
109 struct kobj_attribute *attr, char *buf)
110{
111 return sprintf(buf, "%d\n", !!kexec_image);
112}
113KERNEL_ATTR_RO(kexec_loaded);
114
115static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
116 struct kobj_attribute *attr, char *buf)
117{
118 return sprintf(buf, "%d\n", kexec_crash_loaded());
119}
120KERNEL_ATTR_RO(kexec_crash_loaded);
121
122static ssize_t kexec_crash_size_show(struct kobject *kobj,
123 struct kobj_attribute *attr, char *buf)
124{
125 ssize_t size = crash_get_memory_size();
126
127 if (size < 0)
128 return size;
129
130 return sprintf(buf, "%zd\n", size);
131}
132static ssize_t kexec_crash_size_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned long cnt;
137 int ret;
138
139 if (kstrtoul(buf, 0, &cnt))
140 return -EINVAL;
141
142 ret = crash_shrink_memory(cnt);
143 return ret < 0 ? ret : count;
144}
145KERNEL_ATTR_RW(kexec_crash_size);
146
147#endif /* CONFIG_KEXEC_CORE */
148
149#ifdef CONFIG_CRASH_CORE
150
151static ssize_t vmcoreinfo_show(struct kobject *kobj,
152 struct kobj_attribute *attr, char *buf)
153{
154 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
155 return sprintf(buf, "%pa %x\n", &vmcore_base,
156 (unsigned int)VMCOREINFO_NOTE_SIZE);
157}
158KERNEL_ATTR_RO(vmcoreinfo);
159
160#endif /* CONFIG_CRASH_CORE */
161
162/* whether file capabilities are enabled */
163static ssize_t fscaps_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf)
165{
166 return sprintf(buf, "%d\n", file_caps_enabled);
167}
168KERNEL_ATTR_RO(fscaps);
169
170#ifndef CONFIG_TINY_RCU
171int rcu_expedited;
172static ssize_t rcu_expedited_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf)
174{
175 return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
176}
177static ssize_t rcu_expedited_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t count)
180{
181 if (kstrtoint(buf, 0, &rcu_expedited))
182 return -EINVAL;
183
184 return count;
185}
186KERNEL_ATTR_RW(rcu_expedited);
187
188int rcu_normal;
189static ssize_t rcu_normal_show(struct kobject *kobj,
190 struct kobj_attribute *attr, char *buf)
191{
192 return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
193}
194static ssize_t rcu_normal_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
197{
198 if (kstrtoint(buf, 0, &rcu_normal))
199 return -EINVAL;
200
201 return count;
202}
203KERNEL_ATTR_RW(rcu_normal);
204#endif /* #ifndef CONFIG_TINY_RCU */
205
206/*
207 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
208 */
209extern const void __start_notes __weak;
210extern const void __stop_notes __weak;
211#define notes_size (&__stop_notes - &__start_notes)
212
213static ssize_t notes_read(struct file *filp, struct kobject *kobj,
214 struct bin_attribute *bin_attr,
215 char *buf, loff_t off, size_t count)
216{
217 memcpy(buf, &__start_notes + off, count);
218 return count;
219}
220
221static struct bin_attribute notes_attr __ro_after_init = {
222 .attr = {
223 .name = "notes",
224 .mode = S_IRUGO,
225 },
226 .read = ¬es_read,
227};
228
229struct kobject *kernel_kobj;
230EXPORT_SYMBOL_GPL(kernel_kobj);
231
232static struct attribute * kernel_attrs[] = {
233 &fscaps_attr.attr,
234 &uevent_seqnum_attr.attr,
235 &cpu_byteorder_attr.attr,
236#ifdef CONFIG_UEVENT_HELPER
237 &uevent_helper_attr.attr,
238#endif
239#ifdef CONFIG_PROFILING
240 &profiling_attr.attr,
241#endif
242#ifdef CONFIG_KEXEC_CORE
243 &kexec_loaded_attr.attr,
244 &kexec_crash_loaded_attr.attr,
245 &kexec_crash_size_attr.attr,
246#endif
247#ifdef CONFIG_CRASH_CORE
248 &vmcoreinfo_attr.attr,
249#endif
250#ifndef CONFIG_TINY_RCU
251 &rcu_expedited_attr.attr,
252 &rcu_normal_attr.attr,
253#endif
254 NULL
255};
256
257static const struct attribute_group kernel_attr_group = {
258 .attrs = kernel_attrs,
259};
260
261static int __init ksysfs_init(void)
262{
263 int error;
264
265 kernel_kobj = kobject_create_and_add("kernel", NULL);
266 if (!kernel_kobj) {
267 error = -ENOMEM;
268 goto exit;
269 }
270 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
271 if (error)
272 goto kset_exit;
273
274 if (notes_size > 0) {
275 notes_attr.size = notes_size;
276 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
277 if (error)
278 goto group_exit;
279 }
280
281 return 0;
282
283group_exit:
284 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
285kset_exit:
286 kobject_put(kernel_kobj);
287exit:
288 return error;
289}
290
291core_initcall(ksysfs_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum));
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54/* address bits */
55static ssize_t address_bits_show(struct kobject *kobj,
56 struct kobj_attribute *attr, char *buf)
57{
58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */);
59}
60KERNEL_ATTR_RO(address_bits);
61
62#ifdef CONFIG_UEVENT_HELPER
63/* uevent helper program, used during early boot */
64static ssize_t uevent_helper_show(struct kobject *kobj,
65 struct kobj_attribute *attr, char *buf)
66{
67 return sysfs_emit(buf, "%s\n", uevent_helper);
68}
69static ssize_t uevent_helper_store(struct kobject *kobj,
70 struct kobj_attribute *attr,
71 const char *buf, size_t count)
72{
73 if (count+1 > UEVENT_HELPER_PATH_LEN)
74 return -ENOENT;
75 memcpy(uevent_helper, buf, count);
76 uevent_helper[count] = '\0';
77 if (count && uevent_helper[count-1] == '\n')
78 uevent_helper[count-1] = '\0';
79 return count;
80}
81KERNEL_ATTR_RW(uevent_helper);
82#endif
83
84#ifdef CONFIG_PROFILING
85static ssize_t profiling_show(struct kobject *kobj,
86 struct kobj_attribute *attr, char *buf)
87{
88 return sysfs_emit(buf, "%d\n", prof_on);
89}
90static ssize_t profiling_store(struct kobject *kobj,
91 struct kobj_attribute *attr,
92 const char *buf, size_t count)
93{
94 int ret;
95
96 if (prof_on)
97 return -EEXIST;
98 /*
99 * This eventually calls into get_option() which
100 * has a ton of callers and is not const. It is
101 * easiest to cast it away here.
102 */
103 profile_setup((char *)buf);
104 ret = profile_init();
105 if (ret)
106 return ret;
107 ret = create_proc_profile();
108 if (ret)
109 return ret;
110 return count;
111}
112KERNEL_ATTR_RW(profiling);
113#endif
114
115#ifdef CONFIG_KEXEC_CORE
116static ssize_t kexec_loaded_show(struct kobject *kobj,
117 struct kobj_attribute *attr, char *buf)
118{
119 return sysfs_emit(buf, "%d\n", !!kexec_image);
120}
121KERNEL_ATTR_RO(kexec_loaded);
122
123#ifdef CONFIG_CRASH_DUMP
124static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
125 struct kobj_attribute *attr, char *buf)
126{
127 return sysfs_emit(buf, "%d\n", kexec_crash_loaded());
128}
129KERNEL_ATTR_RO(kexec_crash_loaded);
130
131static ssize_t kexec_crash_size_show(struct kobject *kobj,
132 struct kobj_attribute *attr, char *buf)
133{
134 ssize_t size = crash_get_memory_size();
135
136 if (size < 0)
137 return size;
138
139 return sysfs_emit(buf, "%zd\n", size);
140}
141static ssize_t kexec_crash_size_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t count)
144{
145 unsigned long cnt;
146 int ret;
147
148 if (kstrtoul(buf, 0, &cnt))
149 return -EINVAL;
150
151 ret = crash_shrink_memory(cnt);
152 return ret < 0 ? ret : count;
153}
154KERNEL_ATTR_RW(kexec_crash_size);
155
156#endif /* CONFIG_CRASH_DUMP*/
157#endif /* CONFIG_KEXEC_CORE */
158
159#ifdef CONFIG_VMCORE_INFO
160
161static ssize_t vmcoreinfo_show(struct kobject *kobj,
162 struct kobj_attribute *attr, char *buf)
163{
164 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
165 return sysfs_emit(buf, "%pa %x\n", &vmcore_base,
166 (unsigned int)VMCOREINFO_NOTE_SIZE);
167}
168KERNEL_ATTR_RO(vmcoreinfo);
169
170#ifdef CONFIG_CRASH_HOTPLUG
171static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj,
172 struct kobj_attribute *attr, char *buf)
173{
174 unsigned int sz = crash_get_elfcorehdr_size();
175
176 return sysfs_emit(buf, "%u\n", sz);
177}
178KERNEL_ATTR_RO(crash_elfcorehdr_size);
179
180#endif
181
182#endif /* CONFIG_VMCORE_INFO */
183
184/* whether file capabilities are enabled */
185static ssize_t fscaps_show(struct kobject *kobj,
186 struct kobj_attribute *attr, char *buf)
187{
188 return sysfs_emit(buf, "%d\n", file_caps_enabled);
189}
190KERNEL_ATTR_RO(fscaps);
191
192#ifndef CONFIG_TINY_RCU
193int rcu_expedited;
194static ssize_t rcu_expedited_show(struct kobject *kobj,
195 struct kobj_attribute *attr, char *buf)
196{
197 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited));
198}
199static ssize_t rcu_expedited_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t count)
202{
203 if (kstrtoint(buf, 0, &rcu_expedited))
204 return -EINVAL;
205
206 return count;
207}
208KERNEL_ATTR_RW(rcu_expedited);
209
210int rcu_normal;
211static ssize_t rcu_normal_show(struct kobject *kobj,
212 struct kobj_attribute *attr, char *buf)
213{
214 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal));
215}
216static ssize_t rcu_normal_store(struct kobject *kobj,
217 struct kobj_attribute *attr,
218 const char *buf, size_t count)
219{
220 if (kstrtoint(buf, 0, &rcu_normal))
221 return -EINVAL;
222
223 return count;
224}
225KERNEL_ATTR_RW(rcu_normal);
226#endif /* #ifndef CONFIG_TINY_RCU */
227
228/*
229 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
230 */
231extern const void __start_notes __weak;
232extern const void __stop_notes __weak;
233#define notes_size (&__stop_notes - &__start_notes)
234
235static ssize_t notes_read(struct file *filp, struct kobject *kobj,
236 struct bin_attribute *bin_attr,
237 char *buf, loff_t off, size_t count)
238{
239 memcpy(buf, &__start_notes + off, count);
240 return count;
241}
242
243static struct bin_attribute notes_attr __ro_after_init = {
244 .attr = {
245 .name = "notes",
246 .mode = S_IRUGO,
247 },
248 .read = ¬es_read,
249};
250
251struct kobject *kernel_kobj;
252EXPORT_SYMBOL_GPL(kernel_kobj);
253
254static struct attribute * kernel_attrs[] = {
255 &fscaps_attr.attr,
256 &uevent_seqnum_attr.attr,
257 &cpu_byteorder_attr.attr,
258 &address_bits_attr.attr,
259#ifdef CONFIG_UEVENT_HELPER
260 &uevent_helper_attr.attr,
261#endif
262#ifdef CONFIG_PROFILING
263 &profiling_attr.attr,
264#endif
265#ifdef CONFIG_KEXEC_CORE
266 &kexec_loaded_attr.attr,
267#ifdef CONFIG_CRASH_DUMP
268 &kexec_crash_loaded_attr.attr,
269 &kexec_crash_size_attr.attr,
270#endif
271#endif
272#ifdef CONFIG_VMCORE_INFO
273 &vmcoreinfo_attr.attr,
274#ifdef CONFIG_CRASH_HOTPLUG
275 &crash_elfcorehdr_size_attr.attr,
276#endif
277#endif
278#ifndef CONFIG_TINY_RCU
279 &rcu_expedited_attr.attr,
280 &rcu_normal_attr.attr,
281#endif
282 NULL
283};
284
285static const struct attribute_group kernel_attr_group = {
286 .attrs = kernel_attrs,
287};
288
289static int __init ksysfs_init(void)
290{
291 int error;
292
293 kernel_kobj = kobject_create_and_add("kernel", NULL);
294 if (!kernel_kobj) {
295 error = -ENOMEM;
296 goto exit;
297 }
298 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
299 if (error)
300 goto kset_exit;
301
302 if (notes_size > 0) {
303 notes_attr.size = notes_size;
304 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
305 if (error)
306 goto group_exit;
307 }
308
309 return 0;
310
311group_exit:
312 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
313kset_exit:
314 kobject_put(kernel_kobj);
315exit:
316 return error;
317}
318
319core_initcall(ksysfs_init);