Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54#ifdef CONFIG_UEVENT_HELPER
55/* uevent helper program, used during early boot */
56static ssize_t uevent_helper_show(struct kobject *kobj,
57 struct kobj_attribute *attr, char *buf)
58{
59 return sprintf(buf, "%s\n", uevent_helper);
60}
61static ssize_t uevent_helper_store(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 const char *buf, size_t count)
64{
65 if (count+1 > UEVENT_HELPER_PATH_LEN)
66 return -ENOENT;
67 memcpy(uevent_helper, buf, count);
68 uevent_helper[count] = '\0';
69 if (count && uevent_helper[count-1] == '\n')
70 uevent_helper[count-1] = '\0';
71 return count;
72}
73KERNEL_ATTR_RW(uevent_helper);
74#endif
75
76#ifdef CONFIG_PROFILING
77static ssize_t profiling_show(struct kobject *kobj,
78 struct kobj_attribute *attr, char *buf)
79{
80 return sprintf(buf, "%d\n", prof_on);
81}
82static ssize_t profiling_store(struct kobject *kobj,
83 struct kobj_attribute *attr,
84 const char *buf, size_t count)
85{
86 int ret;
87
88 if (prof_on)
89 return -EEXIST;
90 /*
91 * This eventually calls into get_option() which
92 * has a ton of callers and is not const. It is
93 * easiest to cast it away here.
94 */
95 profile_setup((char *)buf);
96 ret = profile_init();
97 if (ret)
98 return ret;
99 ret = create_proc_profile();
100 if (ret)
101 return ret;
102 return count;
103}
104KERNEL_ATTR_RW(profiling);
105#endif
106
107#ifdef CONFIG_KEXEC_CORE
108static ssize_t kexec_loaded_show(struct kobject *kobj,
109 struct kobj_attribute *attr, char *buf)
110{
111 return sprintf(buf, "%d\n", !!kexec_image);
112}
113KERNEL_ATTR_RO(kexec_loaded);
114
115static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
116 struct kobj_attribute *attr, char *buf)
117{
118 return sprintf(buf, "%d\n", kexec_crash_loaded());
119}
120KERNEL_ATTR_RO(kexec_crash_loaded);
121
122static ssize_t kexec_crash_size_show(struct kobject *kobj,
123 struct kobj_attribute *attr, char *buf)
124{
125 ssize_t size = crash_get_memory_size();
126
127 if (size < 0)
128 return size;
129
130 return sprintf(buf, "%zd\n", size);
131}
132static ssize_t kexec_crash_size_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned long cnt;
137 int ret;
138
139 if (kstrtoul(buf, 0, &cnt))
140 return -EINVAL;
141
142 ret = crash_shrink_memory(cnt);
143 return ret < 0 ? ret : count;
144}
145KERNEL_ATTR_RW(kexec_crash_size);
146
147#endif /* CONFIG_KEXEC_CORE */
148
149#ifdef CONFIG_CRASH_CORE
150
151static ssize_t vmcoreinfo_show(struct kobject *kobj,
152 struct kobj_attribute *attr, char *buf)
153{
154 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
155 return sprintf(buf, "%pa %x\n", &vmcore_base,
156 (unsigned int)VMCOREINFO_NOTE_SIZE);
157}
158KERNEL_ATTR_RO(vmcoreinfo);
159
160#endif /* CONFIG_CRASH_CORE */
161
162/* whether file capabilities are enabled */
163static ssize_t fscaps_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf)
165{
166 return sprintf(buf, "%d\n", file_caps_enabled);
167}
168KERNEL_ATTR_RO(fscaps);
169
170#ifndef CONFIG_TINY_RCU
171int rcu_expedited;
172static ssize_t rcu_expedited_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf)
174{
175 return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
176}
177static ssize_t rcu_expedited_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t count)
180{
181 if (kstrtoint(buf, 0, &rcu_expedited))
182 return -EINVAL;
183
184 return count;
185}
186KERNEL_ATTR_RW(rcu_expedited);
187
188int rcu_normal;
189static ssize_t rcu_normal_show(struct kobject *kobj,
190 struct kobj_attribute *attr, char *buf)
191{
192 return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
193}
194static ssize_t rcu_normal_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
197{
198 if (kstrtoint(buf, 0, &rcu_normal))
199 return -EINVAL;
200
201 return count;
202}
203KERNEL_ATTR_RW(rcu_normal);
204#endif /* #ifndef CONFIG_TINY_RCU */
205
206/*
207 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
208 */
209extern const void __start_notes __weak;
210extern const void __stop_notes __weak;
211#define notes_size (&__stop_notes - &__start_notes)
212
213static ssize_t notes_read(struct file *filp, struct kobject *kobj,
214 struct bin_attribute *bin_attr,
215 char *buf, loff_t off, size_t count)
216{
217 memcpy(buf, &__start_notes + off, count);
218 return count;
219}
220
221static struct bin_attribute notes_attr __ro_after_init = {
222 .attr = {
223 .name = "notes",
224 .mode = S_IRUGO,
225 },
226 .read = ¬es_read,
227};
228
229struct kobject *kernel_kobj;
230EXPORT_SYMBOL_GPL(kernel_kobj);
231
232static struct attribute * kernel_attrs[] = {
233 &fscaps_attr.attr,
234 &uevent_seqnum_attr.attr,
235 &cpu_byteorder_attr.attr,
236#ifdef CONFIG_UEVENT_HELPER
237 &uevent_helper_attr.attr,
238#endif
239#ifdef CONFIG_PROFILING
240 &profiling_attr.attr,
241#endif
242#ifdef CONFIG_KEXEC_CORE
243 &kexec_loaded_attr.attr,
244 &kexec_crash_loaded_attr.attr,
245 &kexec_crash_size_attr.attr,
246#endif
247#ifdef CONFIG_CRASH_CORE
248 &vmcoreinfo_attr.attr,
249#endif
250#ifndef CONFIG_TINY_RCU
251 &rcu_expedited_attr.attr,
252 &rcu_normal_attr.attr,
253#endif
254 NULL
255};
256
257static const struct attribute_group kernel_attr_group = {
258 .attrs = kernel_attrs,
259};
260
261static int __init ksysfs_init(void)
262{
263 int error;
264
265 kernel_kobj = kobject_create_and_add("kernel", NULL);
266 if (!kernel_kobj) {
267 error = -ENOMEM;
268 goto exit;
269 }
270 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
271 if (error)
272 goto kset_exit;
273
274 if (notes_size > 0) {
275 notes_attr.size = notes_size;
276 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
277 if (error)
278 goto group_exit;
279 }
280
281 return 0;
282
283group_exit:
284 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
285kset_exit:
286 kobject_put(kernel_kobj);
287exit:
288 return error;
289}
290
291core_initcall(ksysfs_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9#include <asm/byteorder.h>
10#include <linux/kobject.h>
11#include <linux/string.h>
12#include <linux/sysfs.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kexec.h>
16#include <linux/profile.h>
17#include <linux/stat.h>
18#include <linux/sched.h>
19#include <linux/capability.h>
20#include <linux/compiler.h>
21
22#include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
23
24#if defined(__LITTLE_ENDIAN)
25#define CPU_BYTEORDER_STRING "little"
26#elif defined(__BIG_ENDIAN)
27#define CPU_BYTEORDER_STRING "big"
28#else
29#error Unknown byteorder
30#endif
31
32#define KERNEL_ATTR_RO(_name) \
33static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34
35#define KERNEL_ATTR_RW(_name) \
36static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37
38/* current uevent sequence number */
39static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 struct kobj_attribute *attr, char *buf)
41{
42 return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum));
43}
44KERNEL_ATTR_RO(uevent_seqnum);
45
46/* cpu byteorder */
47static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 struct kobj_attribute *attr, char *buf)
49{
50 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51}
52KERNEL_ATTR_RO(cpu_byteorder);
53
54/* address bits */
55static ssize_t address_bits_show(struct kobject *kobj,
56 struct kobj_attribute *attr, char *buf)
57{
58 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */);
59}
60KERNEL_ATTR_RO(address_bits);
61
62#ifdef CONFIG_UEVENT_HELPER
63/* uevent helper program, used during early boot */
64static ssize_t uevent_helper_show(struct kobject *kobj,
65 struct kobj_attribute *attr, char *buf)
66{
67 return sysfs_emit(buf, "%s\n", uevent_helper);
68}
69static ssize_t uevent_helper_store(struct kobject *kobj,
70 struct kobj_attribute *attr,
71 const char *buf, size_t count)
72{
73 if (count+1 > UEVENT_HELPER_PATH_LEN)
74 return -ENOENT;
75 memcpy(uevent_helper, buf, count);
76 uevent_helper[count] = '\0';
77 if (count && uevent_helper[count-1] == '\n')
78 uevent_helper[count-1] = '\0';
79 return count;
80}
81KERNEL_ATTR_RW(uevent_helper);
82#endif
83
84#ifdef CONFIG_PROFILING
85static ssize_t profiling_show(struct kobject *kobj,
86 struct kobj_attribute *attr, char *buf)
87{
88 return sysfs_emit(buf, "%d\n", prof_on);
89}
90static ssize_t profiling_store(struct kobject *kobj,
91 struct kobj_attribute *attr,
92 const char *buf, size_t count)
93{
94 int ret;
95 static DEFINE_MUTEX(lock);
96
97 /*
98 * We need serialization, for profile_setup() initializes prof_on
99 * value and profile_init() must not reallocate prof_buffer after
100 * once allocated.
101 */
102 guard(mutex)(&lock);
103 if (prof_on)
104 return -EEXIST;
105 /*
106 * This eventually calls into get_option() which
107 * has a ton of callers and is not const. It is
108 * easiest to cast it away here.
109 */
110 profile_setup((char *)buf);
111 ret = profile_init();
112 if (ret)
113 return ret;
114 ret = create_proc_profile();
115 if (ret)
116 return ret;
117 return count;
118}
119KERNEL_ATTR_RW(profiling);
120#endif
121
122#ifdef CONFIG_KEXEC_CORE
123static ssize_t kexec_loaded_show(struct kobject *kobj,
124 struct kobj_attribute *attr, char *buf)
125{
126 return sysfs_emit(buf, "%d\n", !!kexec_image);
127}
128KERNEL_ATTR_RO(kexec_loaded);
129
130#ifdef CONFIG_CRASH_DUMP
131static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
132 struct kobj_attribute *attr, char *buf)
133{
134 return sysfs_emit(buf, "%d\n", kexec_crash_loaded());
135}
136KERNEL_ATTR_RO(kexec_crash_loaded);
137
138static ssize_t kexec_crash_size_show(struct kobject *kobj,
139 struct kobj_attribute *attr, char *buf)
140{
141 ssize_t size = crash_get_memory_size();
142
143 if (size < 0)
144 return size;
145
146 return sysfs_emit(buf, "%zd\n", size);
147}
148static ssize_t kexec_crash_size_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t count)
151{
152 unsigned long cnt;
153 int ret;
154
155 if (kstrtoul(buf, 0, &cnt))
156 return -EINVAL;
157
158 ret = crash_shrink_memory(cnt);
159 return ret < 0 ? ret : count;
160}
161KERNEL_ATTR_RW(kexec_crash_size);
162
163#endif /* CONFIG_CRASH_DUMP*/
164#endif /* CONFIG_KEXEC_CORE */
165
166#ifdef CONFIG_VMCORE_INFO
167
168static ssize_t vmcoreinfo_show(struct kobject *kobj,
169 struct kobj_attribute *attr, char *buf)
170{
171 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
172 return sysfs_emit(buf, "%pa %x\n", &vmcore_base,
173 (unsigned int)VMCOREINFO_NOTE_SIZE);
174}
175KERNEL_ATTR_RO(vmcoreinfo);
176
177#ifdef CONFIG_CRASH_HOTPLUG
178static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj,
179 struct kobj_attribute *attr, char *buf)
180{
181 unsigned int sz = crash_get_elfcorehdr_size();
182
183 return sysfs_emit(buf, "%u\n", sz);
184}
185KERNEL_ATTR_RO(crash_elfcorehdr_size);
186
187#endif
188
189#endif /* CONFIG_VMCORE_INFO */
190
191/* whether file capabilities are enabled */
192static ssize_t fscaps_show(struct kobject *kobj,
193 struct kobj_attribute *attr, char *buf)
194{
195 return sysfs_emit(buf, "%d\n", file_caps_enabled);
196}
197KERNEL_ATTR_RO(fscaps);
198
199#ifndef CONFIG_TINY_RCU
200int rcu_expedited;
201static ssize_t rcu_expedited_show(struct kobject *kobj,
202 struct kobj_attribute *attr, char *buf)
203{
204 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited));
205}
206static ssize_t rcu_expedited_store(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 const char *buf, size_t count)
209{
210 if (kstrtoint(buf, 0, &rcu_expedited))
211 return -EINVAL;
212
213 return count;
214}
215KERNEL_ATTR_RW(rcu_expedited);
216
217int rcu_normal;
218static ssize_t rcu_normal_show(struct kobject *kobj,
219 struct kobj_attribute *attr, char *buf)
220{
221 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal));
222}
223static ssize_t rcu_normal_store(struct kobject *kobj,
224 struct kobj_attribute *attr,
225 const char *buf, size_t count)
226{
227 if (kstrtoint(buf, 0, &rcu_normal))
228 return -EINVAL;
229
230 return count;
231}
232KERNEL_ATTR_RW(rcu_normal);
233#endif /* #ifndef CONFIG_TINY_RCU */
234
235/*
236 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
237 */
238extern const void __start_notes;
239extern const void __stop_notes;
240#define notes_size (&__stop_notes - &__start_notes)
241
242static ssize_t notes_read(struct file *filp, struct kobject *kobj,
243 struct bin_attribute *bin_attr,
244 char *buf, loff_t off, size_t count)
245{
246 memcpy(buf, &__start_notes + off, count);
247 return count;
248}
249
250static struct bin_attribute notes_attr __ro_after_init = {
251 .attr = {
252 .name = "notes",
253 .mode = S_IRUGO,
254 },
255 .read = ¬es_read,
256};
257
258struct kobject *kernel_kobj;
259EXPORT_SYMBOL_GPL(kernel_kobj);
260
261static struct attribute * kernel_attrs[] = {
262 &fscaps_attr.attr,
263 &uevent_seqnum_attr.attr,
264 &cpu_byteorder_attr.attr,
265 &address_bits_attr.attr,
266#ifdef CONFIG_UEVENT_HELPER
267 &uevent_helper_attr.attr,
268#endif
269#ifdef CONFIG_PROFILING
270 &profiling_attr.attr,
271#endif
272#ifdef CONFIG_KEXEC_CORE
273 &kexec_loaded_attr.attr,
274#ifdef CONFIG_CRASH_DUMP
275 &kexec_crash_loaded_attr.attr,
276 &kexec_crash_size_attr.attr,
277#endif
278#endif
279#ifdef CONFIG_VMCORE_INFO
280 &vmcoreinfo_attr.attr,
281#ifdef CONFIG_CRASH_HOTPLUG
282 &crash_elfcorehdr_size_attr.attr,
283#endif
284#endif
285#ifndef CONFIG_TINY_RCU
286 &rcu_expedited_attr.attr,
287 &rcu_normal_attr.attr,
288#endif
289 NULL
290};
291
292static const struct attribute_group kernel_attr_group = {
293 .attrs = kernel_attrs,
294};
295
296static int __init ksysfs_init(void)
297{
298 int error;
299
300 kernel_kobj = kobject_create_and_add("kernel", NULL);
301 if (!kernel_kobj) {
302 error = -ENOMEM;
303 goto exit;
304 }
305 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
306 if (error)
307 goto kset_exit;
308
309 if (notes_size > 0) {
310 notes_attr.size = notes_size;
311 error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
312 if (error)
313 goto group_exit;
314 }
315
316 return 0;
317
318group_exit:
319 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
320kset_exit:
321 kobject_put(kernel_kobj);
322exit:
323 return error;
324}
325
326core_initcall(ksysfs_init);