Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mm.h>
  4#include <linux/file.h>
  5#include <linux/fdtable.h>
  6#include <linux/fs_struct.h>
  7#include <linux/mount.h>
  8#include <linux/ptrace.h>
  9#include <linux/slab.h>
 10#include <linux/seq_file.h>
 11#include <linux/sched/mm.h>
 12
 13#include "internal.h"
 14
 15/*
 16 * Logic: we've got two memory sums for each process, "shared", and
 17 * "non-shared". Shared memory may get counted more than once, for
 18 * each process that owns it. Non-shared memory is counted
 19 * accurately.
 20 */
 21void task_mem(struct seq_file *m, struct mm_struct *mm)
 22{
 23	struct vm_area_struct *vma;
 24	struct vm_region *region;
 25	struct rb_node *p;
 26	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 27        
 28	down_read(&mm->mmap_sem);
 29	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 30		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 31
 32		bytes += kobjsize(vma);
 33
 34		region = vma->vm_region;
 35		if (region) {
 36			size = kobjsize(region);
 37			size += region->vm_end - region->vm_start;
 38		} else {
 39			size = vma->vm_end - vma->vm_start;
 40		}
 41
 42		if (atomic_read(&mm->mm_count) > 1 ||
 43		    vma->vm_flags & VM_MAYSHARE) {
 44			sbytes += size;
 45		} else {
 46			bytes += size;
 47			if (region)
 48				slack = region->vm_end - vma->vm_end;
 49		}
 50	}
 51
 52	if (atomic_read(&mm->mm_count) > 1)
 53		sbytes += kobjsize(mm);
 54	else
 55		bytes += kobjsize(mm);
 56	
 57	if (current->fs && current->fs->users > 1)
 58		sbytes += kobjsize(current->fs);
 59	else
 60		bytes += kobjsize(current->fs);
 61
 62	if (current->files && atomic_read(&current->files->count) > 1)
 63		sbytes += kobjsize(current->files);
 64	else
 65		bytes += kobjsize(current->files);
 66
 67	if (current->sighand && atomic_read(&current->sighand->count) > 1)
 68		sbytes += kobjsize(current->sighand);
 69	else
 70		bytes += kobjsize(current->sighand);
 71
 72	bytes += kobjsize(current); /* includes kernel stack */
 73
 74	seq_printf(m,
 75		"Mem:\t%8lu bytes\n"
 76		"Slack:\t%8lu bytes\n"
 77		"Shared:\t%8lu bytes\n",
 78		bytes, slack, sbytes);
 79
 80	up_read(&mm->mmap_sem);
 81}
 82
 83unsigned long task_vsize(struct mm_struct *mm)
 84{
 85	struct vm_area_struct *vma;
 86	struct rb_node *p;
 87	unsigned long vsize = 0;
 88
 89	down_read(&mm->mmap_sem);
 90	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 91		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 92		vsize += vma->vm_end - vma->vm_start;
 93	}
 94	up_read(&mm->mmap_sem);
 95	return vsize;
 96}
 97
 98unsigned long task_statm(struct mm_struct *mm,
 99			 unsigned long *shared, unsigned long *text,
100			 unsigned long *data, unsigned long *resident)
101{
102	struct vm_area_struct *vma;
103	struct vm_region *region;
104	struct rb_node *p;
105	unsigned long size = kobjsize(mm);
106
107	down_read(&mm->mmap_sem);
108	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
109		vma = rb_entry(p, struct vm_area_struct, vm_rb);
110		size += kobjsize(vma);
111		region = vma->vm_region;
112		if (region) {
113			size += kobjsize(region);
114			size += region->vm_end - region->vm_start;
115		}
116	}
117
118	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
119		>> PAGE_SHIFT;
120	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
121		>> PAGE_SHIFT;
122	up_read(&mm->mmap_sem);
123	size >>= PAGE_SHIFT;
124	size += *text + *data;
125	*resident = size;
126	return size;
127}
128
129static int is_stack(struct vm_area_struct *vma)
130{
131	struct mm_struct *mm = vma->vm_mm;
132
133	/*
134	 * We make no effort to guess what a given thread considers to be
135	 * its "stack".  It's not even well-defined for programs written
136	 * languages like Go.
137	 */
138	return vma->vm_start <= mm->start_stack &&
139		vma->vm_end >= mm->start_stack;
140}
141
142/*
143 * display a single VMA to a sequenced file
144 */
145static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
146			  int is_pid)
147{
148	struct mm_struct *mm = vma->vm_mm;
 
149	unsigned long ino = 0;
150	struct file *file;
151	dev_t dev = 0;
152	int flags;
153	unsigned long long pgoff = 0;
154
155	flags = vma->vm_flags;
156	file = vma->vm_file;
157
158	if (file) {
159		struct inode *inode = file_inode(vma->vm_file);
160		dev = inode->i_sb->s_dev;
161		ino = inode->i_ino;
162		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
163	}
164
165	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
166	seq_printf(m,
167		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
168		   vma->vm_start,
169		   vma->vm_end,
170		   flags & VM_READ ? 'r' : '-',
171		   flags & VM_WRITE ? 'w' : '-',
172		   flags & VM_EXEC ? 'x' : '-',
173		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
174		   pgoff,
175		   MAJOR(dev), MINOR(dev), ino);
176
177	if (file) {
178		seq_pad(m, ' ');
179		seq_file_path(m, file, "");
180	} else if (mm && is_stack(vma)) {
181		seq_pad(m, ' ');
182		seq_printf(m, "[stack]");
 
 
 
 
 
 
 
 
 
 
 
 
183	}
184
185	seq_putc(m, '\n');
186	return 0;
187}
188
189/*
190 * display mapping lines for a particular process's /proc/pid/maps
191 */
192static int show_map(struct seq_file *m, void *_p, int is_pid)
193{
194	struct rb_node *p = _p;
195
196	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
197			      is_pid);
198}
199
200static int show_pid_map(struct seq_file *m, void *_p)
201{
202	return show_map(m, _p, 1);
203}
204
205static int show_tid_map(struct seq_file *m, void *_p)
206{
207	return show_map(m, _p, 0);
208}
209
210static void *m_start(struct seq_file *m, loff_t *pos)
211{
212	struct proc_maps_private *priv = m->private;
213	struct mm_struct *mm;
214	struct rb_node *p;
215	loff_t n = *pos;
216
217	/* pin the task and mm whilst we play with them */
218	priv->task = get_proc_task(priv->inode);
219	if (!priv->task)
220		return ERR_PTR(-ESRCH);
221
222	mm = priv->mm;
223	if (!mm || !mmget_not_zero(mm))
224		return NULL;
225
 
 
226	down_read(&mm->mmap_sem);
 
227	/* start from the Nth VMA */
228	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
229		if (n-- == 0)
230			return p;
231
232	up_read(&mm->mmap_sem);
233	mmput(mm);
234	return NULL;
235}
236
237static void m_stop(struct seq_file *m, void *_vml)
238{
239	struct proc_maps_private *priv = m->private;
240
241	if (!IS_ERR_OR_NULL(_vml)) {
242		up_read(&priv->mm->mmap_sem);
243		mmput(priv->mm);
244	}
245	if (priv->task) {
 
 
 
246		put_task_struct(priv->task);
247		priv->task = NULL;
248	}
249}
250
251static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
252{
253	struct rb_node *p = _p;
254
255	(*pos)++;
256	return p ? rb_next(p) : NULL;
257}
258
259static const struct seq_operations proc_pid_maps_ops = {
260	.start	= m_start,
261	.next	= m_next,
262	.stop	= m_stop,
263	.show	= show_pid_map
264};
265
266static const struct seq_operations proc_tid_maps_ops = {
267	.start	= m_start,
268	.next	= m_next,
269	.stop	= m_stop,
270	.show	= show_tid_map
271};
272
273static int maps_open(struct inode *inode, struct file *file,
274		     const struct seq_operations *ops)
275{
276	struct proc_maps_private *priv;
 
277
278	priv = __seq_open_private(file, ops, sizeof(*priv));
279	if (!priv)
280		return -ENOMEM;
281
282	priv->inode = inode;
283	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
284	if (IS_ERR(priv->mm)) {
285		int err = PTR_ERR(priv->mm);
286
287		seq_release_private(inode, file);
288		return err;
289	}
290
291	return 0;
292}
293
294
295static int map_release(struct inode *inode, struct file *file)
296{
297	struct seq_file *seq = file->private_data;
298	struct proc_maps_private *priv = seq->private;
299
300	if (priv->mm)
301		mmdrop(priv->mm);
302
303	return seq_release_private(inode, file);
304}
305
306static int pid_maps_open(struct inode *inode, struct file *file)
307{
308	return maps_open(inode, file, &proc_pid_maps_ops);
309}
310
311static int tid_maps_open(struct inode *inode, struct file *file)
312{
313	return maps_open(inode, file, &proc_tid_maps_ops);
314}
315
316const struct file_operations proc_pid_maps_operations = {
317	.open		= pid_maps_open,
318	.read		= seq_read,
319	.llseek		= seq_lseek,
320	.release	= map_release,
321};
322
323const struct file_operations proc_tid_maps_operations = {
324	.open		= tid_maps_open,
325	.read		= seq_read,
326	.llseek		= seq_lseek,
327	.release	= map_release,
328};
329
v3.15
 
  1
  2#include <linux/mm.h>
  3#include <linux/file.h>
  4#include <linux/fdtable.h>
  5#include <linux/fs_struct.h>
  6#include <linux/mount.h>
  7#include <linux/ptrace.h>
  8#include <linux/slab.h>
  9#include <linux/seq_file.h>
 
 
 10#include "internal.h"
 11
 12/*
 13 * Logic: we've got two memory sums for each process, "shared", and
 14 * "non-shared". Shared memory may get counted more than once, for
 15 * each process that owns it. Non-shared memory is counted
 16 * accurately.
 17 */
 18void task_mem(struct seq_file *m, struct mm_struct *mm)
 19{
 20	struct vm_area_struct *vma;
 21	struct vm_region *region;
 22	struct rb_node *p;
 23	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 24        
 25	down_read(&mm->mmap_sem);
 26	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 27		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 28
 29		bytes += kobjsize(vma);
 30
 31		region = vma->vm_region;
 32		if (region) {
 33			size = kobjsize(region);
 34			size += region->vm_end - region->vm_start;
 35		} else {
 36			size = vma->vm_end - vma->vm_start;
 37		}
 38
 39		if (atomic_read(&mm->mm_count) > 1 ||
 40		    vma->vm_flags & VM_MAYSHARE) {
 41			sbytes += size;
 42		} else {
 43			bytes += size;
 44			if (region)
 45				slack = region->vm_end - vma->vm_end;
 46		}
 47	}
 48
 49	if (atomic_read(&mm->mm_count) > 1)
 50		sbytes += kobjsize(mm);
 51	else
 52		bytes += kobjsize(mm);
 53	
 54	if (current->fs && current->fs->users > 1)
 55		sbytes += kobjsize(current->fs);
 56	else
 57		bytes += kobjsize(current->fs);
 58
 59	if (current->files && atomic_read(&current->files->count) > 1)
 60		sbytes += kobjsize(current->files);
 61	else
 62		bytes += kobjsize(current->files);
 63
 64	if (current->sighand && atomic_read(&current->sighand->count) > 1)
 65		sbytes += kobjsize(current->sighand);
 66	else
 67		bytes += kobjsize(current->sighand);
 68
 69	bytes += kobjsize(current); /* includes kernel stack */
 70
 71	seq_printf(m,
 72		"Mem:\t%8lu bytes\n"
 73		"Slack:\t%8lu bytes\n"
 74		"Shared:\t%8lu bytes\n",
 75		bytes, slack, sbytes);
 76
 77	up_read(&mm->mmap_sem);
 78}
 79
 80unsigned long task_vsize(struct mm_struct *mm)
 81{
 82	struct vm_area_struct *vma;
 83	struct rb_node *p;
 84	unsigned long vsize = 0;
 85
 86	down_read(&mm->mmap_sem);
 87	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 88		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 89		vsize += vma->vm_end - vma->vm_start;
 90	}
 91	up_read(&mm->mmap_sem);
 92	return vsize;
 93}
 94
 95unsigned long task_statm(struct mm_struct *mm,
 96			 unsigned long *shared, unsigned long *text,
 97			 unsigned long *data, unsigned long *resident)
 98{
 99	struct vm_area_struct *vma;
100	struct vm_region *region;
101	struct rb_node *p;
102	unsigned long size = kobjsize(mm);
103
104	down_read(&mm->mmap_sem);
105	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106		vma = rb_entry(p, struct vm_area_struct, vm_rb);
107		size += kobjsize(vma);
108		region = vma->vm_region;
109		if (region) {
110			size += kobjsize(region);
111			size += region->vm_end - region->vm_start;
112		}
113	}
114
115	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116		>> PAGE_SHIFT;
117	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118		>> PAGE_SHIFT;
119	up_read(&mm->mmap_sem);
120	size >>= PAGE_SHIFT;
121	size += *text + *data;
122	*resident = size;
123	return size;
124}
125
 
 
 
 
 
 
 
 
 
 
 
 
 
126/*
127 * display a single VMA to a sequenced file
128 */
129static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
130			  int is_pid)
131{
132	struct mm_struct *mm = vma->vm_mm;
133	struct proc_maps_private *priv = m->private;
134	unsigned long ino = 0;
135	struct file *file;
136	dev_t dev = 0;
137	int flags;
138	unsigned long long pgoff = 0;
139
140	flags = vma->vm_flags;
141	file = vma->vm_file;
142
143	if (file) {
144		struct inode *inode = file_inode(vma->vm_file);
145		dev = inode->i_sb->s_dev;
146		ino = inode->i_ino;
147		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
148	}
149
150	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
151	seq_printf(m,
152		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
153		   vma->vm_start,
154		   vma->vm_end,
155		   flags & VM_READ ? 'r' : '-',
156		   flags & VM_WRITE ? 'w' : '-',
157		   flags & VM_EXEC ? 'x' : '-',
158		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
159		   pgoff,
160		   MAJOR(dev), MINOR(dev), ino);
161
162	if (file) {
163		seq_pad(m, ' ');
164		seq_path(m, &file->f_path, "");
165	} else if (mm) {
166		pid_t tid = vm_is_stack(priv->task, vma, is_pid);
167
168		if (tid != 0) {
169			seq_pad(m, ' ');
170			/*
171			 * Thread stack in /proc/PID/task/TID/maps or
172			 * the main process stack.
173			 */
174			if (!is_pid || (vma->vm_start <= mm->start_stack &&
175			    vma->vm_end >= mm->start_stack))
176				seq_printf(m, "[stack]");
177			else
178				seq_printf(m, "[stack:%d]", tid);
179		}
180	}
181
182	seq_putc(m, '\n');
183	return 0;
184}
185
186/*
187 * display mapping lines for a particular process's /proc/pid/maps
188 */
189static int show_map(struct seq_file *m, void *_p, int is_pid)
190{
191	struct rb_node *p = _p;
192
193	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
194			      is_pid);
195}
196
197static int show_pid_map(struct seq_file *m, void *_p)
198{
199	return show_map(m, _p, 1);
200}
201
202static int show_tid_map(struct seq_file *m, void *_p)
203{
204	return show_map(m, _p, 0);
205}
206
207static void *m_start(struct seq_file *m, loff_t *pos)
208{
209	struct proc_maps_private *priv = m->private;
210	struct mm_struct *mm;
211	struct rb_node *p;
212	loff_t n = *pos;
213
214	/* pin the task and mm whilst we play with them */
215	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
216	if (!priv->task)
217		return ERR_PTR(-ESRCH);
218
219	mm = mm_access(priv->task, PTRACE_MODE_READ);
220	if (!mm || IS_ERR(mm)) {
221		put_task_struct(priv->task);
222		priv->task = NULL;
223		return mm;
224	}
225	down_read(&mm->mmap_sem);
226
227	/* start from the Nth VMA */
228	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
229		if (n-- == 0)
230			return p;
 
 
 
231	return NULL;
232}
233
234static void m_stop(struct seq_file *m, void *_vml)
235{
236	struct proc_maps_private *priv = m->private;
237
 
 
 
 
238	if (priv->task) {
239		struct mm_struct *mm = priv->task->mm;
240		up_read(&mm->mmap_sem);
241		mmput(mm);
242		put_task_struct(priv->task);
 
243	}
244}
245
246static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
247{
248	struct rb_node *p = _p;
249
250	(*pos)++;
251	return p ? rb_next(p) : NULL;
252}
253
254static const struct seq_operations proc_pid_maps_ops = {
255	.start	= m_start,
256	.next	= m_next,
257	.stop	= m_stop,
258	.show	= show_pid_map
259};
260
261static const struct seq_operations proc_tid_maps_ops = {
262	.start	= m_start,
263	.next	= m_next,
264	.stop	= m_stop,
265	.show	= show_tid_map
266};
267
268static int maps_open(struct inode *inode, struct file *file,
269		     const struct seq_operations *ops)
270{
271	struct proc_maps_private *priv;
272	int ret = -ENOMEM;
273
274	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
275	if (priv) {
276		priv->pid = proc_pid(inode);
277		ret = seq_open(file, ops);
278		if (!ret) {
279			struct seq_file *m = file->private_data;
280			m->private = priv;
281		} else {
282			kfree(priv);
283		}
 
284	}
285	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
286}
287
288static int pid_maps_open(struct inode *inode, struct file *file)
289{
290	return maps_open(inode, file, &proc_pid_maps_ops);
291}
292
293static int tid_maps_open(struct inode *inode, struct file *file)
294{
295	return maps_open(inode, file, &proc_tid_maps_ops);
296}
297
298const struct file_operations proc_pid_maps_operations = {
299	.open		= pid_maps_open,
300	.read		= seq_read,
301	.llseek		= seq_lseek,
302	.release	= seq_release_private,
303};
304
305const struct file_operations proc_tid_maps_operations = {
306	.open		= tid_maps_open,
307	.read		= seq_read,
308	.llseek		= seq_lseek,
309	.release	= seq_release_private,
310};
311