Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mm.h>
  4#include <linux/file.h>
  5#include <linux/fdtable.h>
  6#include <linux/fs_struct.h>
  7#include <linux/mount.h>
  8#include <linux/ptrace.h>
  9#include <linux/slab.h>
 10#include <linux/seq_file.h>
 11#include <linux/sched/mm.h>
 12
 13#include "internal.h"
 14
 15/*
 16 * Logic: we've got two memory sums for each process, "shared", and
 17 * "non-shared". Shared memory may get counted more than once, for
 18 * each process that owns it. Non-shared memory is counted
 19 * accurately.
 20 */
 21void task_mem(struct seq_file *m, struct mm_struct *mm)
 22{
 23	VMA_ITERATOR(vmi, mm, 0);
 24	struct vm_area_struct *vma;
 25	struct vm_region *region;
 
 26	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 27
 28	mmap_read_lock(mm);
 29	for_each_vma(vmi, vma) {
 
 
 30		bytes += kobjsize(vma);
 31
 32		region = vma->vm_region;
 33		if (region) {
 34			size = kobjsize(region);
 35			size += region->vm_end - region->vm_start;
 36		} else {
 37			size = vma->vm_end - vma->vm_start;
 38		}
 39
 40		if (atomic_read(&mm->mm_count) > 1 ||
 41		    is_nommu_shared_mapping(vma->vm_flags)) {
 42			sbytes += size;
 43		} else {
 44			bytes += size;
 45			if (region)
 46				slack = region->vm_end - vma->vm_end;
 47		}
 48	}
 49
 50	if (atomic_read(&mm->mm_count) > 1)
 51		sbytes += kobjsize(mm);
 52	else
 53		bytes += kobjsize(mm);
 54
 55	if (current->fs && current->fs->users > 1)
 56		sbytes += kobjsize(current->fs);
 57	else
 58		bytes += kobjsize(current->fs);
 59
 60	if (current->files && atomic_read(&current->files->count) > 1)
 61		sbytes += kobjsize(current->files);
 62	else
 63		bytes += kobjsize(current->files);
 64
 65	if (current->sighand && refcount_read(&current->sighand->count) > 1)
 66		sbytes += kobjsize(current->sighand);
 67	else
 68		bytes += kobjsize(current->sighand);
 69
 70	bytes += kobjsize(current); /* includes kernel stack */
 71
 72	mmap_read_unlock(mm);
 73
 74	seq_printf(m,
 75		"Mem:\t%8lu bytes\n"
 76		"Slack:\t%8lu bytes\n"
 77		"Shared:\t%8lu bytes\n",
 78		bytes, slack, sbytes);
 
 
 79}
 80
 81unsigned long task_vsize(struct mm_struct *mm)
 82{
 83	VMA_ITERATOR(vmi, mm, 0);
 84	struct vm_area_struct *vma;
 
 85	unsigned long vsize = 0;
 86
 87	mmap_read_lock(mm);
 88	for_each_vma(vmi, vma)
 
 89		vsize += vma->vm_end - vma->vm_start;
 
 90	mmap_read_unlock(mm);
 91	return vsize;
 92}
 93
 94unsigned long task_statm(struct mm_struct *mm,
 95			 unsigned long *shared, unsigned long *text,
 96			 unsigned long *data, unsigned long *resident)
 97{
 98	VMA_ITERATOR(vmi, mm, 0);
 99	struct vm_area_struct *vma;
100	struct vm_region *region;
 
101	unsigned long size = kobjsize(mm);
102
103	mmap_read_lock(mm);
104	for_each_vma(vmi, vma) {
 
105		size += kobjsize(vma);
106		region = vma->vm_region;
107		if (region) {
108			size += kobjsize(region);
109			size += region->vm_end - region->vm_start;
110		}
111	}
112
113	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
114		>> PAGE_SHIFT;
115	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
116		>> PAGE_SHIFT;
117	mmap_read_unlock(mm);
118	size >>= PAGE_SHIFT;
119	size += *text + *data;
120	*resident = size;
121	return size;
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
124/*
125 * display a single VMA to a sequenced file
126 */
127static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
128{
129	struct mm_struct *mm = vma->vm_mm;
130	unsigned long ino = 0;
131	struct file *file;
132	dev_t dev = 0;
133	int flags;
134	unsigned long long pgoff = 0;
135
136	flags = vma->vm_flags;
137	file = vma->vm_file;
138
139	if (file) {
140		struct inode *inode = file_inode(vma->vm_file);
141		dev = inode->i_sb->s_dev;
142		ino = inode->i_ino;
143		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
144	}
145
146	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
147	seq_printf(m,
148		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
149		   vma->vm_start,
150		   vma->vm_end,
151		   flags & VM_READ ? 'r' : '-',
152		   flags & VM_WRITE ? 'w' : '-',
153		   flags & VM_EXEC ? 'x' : '-',
154		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
155		   pgoff,
156		   MAJOR(dev), MINOR(dev), ino);
157
158	if (file) {
159		seq_pad(m, ' ');
160		seq_path(m, file_user_path(file), "");
161	} else if (mm && vma_is_initial_stack(vma)) {
162		seq_pad(m, ' ');
163		seq_puts(m, "[stack]");
164	}
165
166	seq_putc(m, '\n');
167	return 0;
168}
169
170/*
171 * display mapping lines for a particular process's /proc/pid/maps
172 */
173static int show_map(struct seq_file *m, void *_p)
174{
175	return nommu_vma_show(m, _p);
176}
177
178static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
179						loff_t *ppos)
180{
181	struct vm_area_struct *vma = vma_next(&priv->iter);
182
183	if (vma) {
184		*ppos = vma->vm_start;
185	} else {
186		*ppos = -1UL;
187	}
188
189	return vma;
190}
191
192static void *m_start(struct seq_file *m, loff_t *ppos)
193{
194	struct proc_maps_private *priv = m->private;
195	unsigned long last_addr = *ppos;
196	struct mm_struct *mm;
197
198	/* See proc_get_vma(). Zero at the start or after lseek. */
199	if (last_addr == -1UL)
200		return NULL;
201
202	/* pin the task and mm whilst we play with them */
203	priv->task = get_proc_task(priv->inode);
204	if (!priv->task)
205		return ERR_PTR(-ESRCH);
206
207	mm = priv->mm;
208	if (!mm || !mmget_not_zero(mm)) {
209		put_task_struct(priv->task);
210		priv->task = NULL;
211		return NULL;
212	}
213
214	if (mmap_read_lock_killable(mm)) {
215		mmput(mm);
216		put_task_struct(priv->task);
217		priv->task = NULL;
218		return ERR_PTR(-EINTR);
219	}
220
221	vma_iter_init(&priv->iter, mm, last_addr);
 
 
 
222
223	return proc_get_vma(priv, ppos);
 
 
224}
225
226static void m_stop(struct seq_file *m, void *v)
227{
228	struct proc_maps_private *priv = m->private;
229	struct mm_struct *mm = priv->mm;
230
231	if (!priv->task)
232		return;
233
234	mmap_read_unlock(mm);
235	mmput(mm);
236	put_task_struct(priv->task);
237	priv->task = NULL;
 
238}
239
240static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
241{
242	return proc_get_vma(m->private, ppos);
 
 
 
243}
244
245static const struct seq_operations proc_pid_maps_ops = {
246	.start	= m_start,
247	.next	= m_next,
248	.stop	= m_stop,
249	.show	= show_map
250};
251
252static int maps_open(struct inode *inode, struct file *file,
253		     const struct seq_operations *ops)
254{
255	struct proc_maps_private *priv;
256
257	priv = __seq_open_private(file, ops, sizeof(*priv));
258	if (!priv)
259		return -ENOMEM;
260
261	priv->inode = inode;
262	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
263	if (IS_ERR(priv->mm)) {
264		int err = PTR_ERR(priv->mm);
265
266		seq_release_private(inode, file);
267		return err;
268	}
269
270	return 0;
271}
272
273
274static int map_release(struct inode *inode, struct file *file)
275{
276	struct seq_file *seq = file->private_data;
277	struct proc_maps_private *priv = seq->private;
278
279	if (priv->mm)
280		mmdrop(priv->mm);
281
282	return seq_release_private(inode, file);
283}
284
285static int pid_maps_open(struct inode *inode, struct file *file)
286{
287	return maps_open(inode, file, &proc_pid_maps_ops);
288}
289
290const struct file_operations proc_pid_maps_operations = {
291	.open		= pid_maps_open,
292	.read		= seq_read,
293	.llseek		= seq_lseek,
294	.release	= map_release,
295};
296
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mm.h>
  4#include <linux/file.h>
  5#include <linux/fdtable.h>
  6#include <linux/fs_struct.h>
  7#include <linux/mount.h>
  8#include <linux/ptrace.h>
  9#include <linux/slab.h>
 10#include <linux/seq_file.h>
 11#include <linux/sched/mm.h>
 12
 13#include "internal.h"
 14
 15/*
 16 * Logic: we've got two memory sums for each process, "shared", and
 17 * "non-shared". Shared memory may get counted more than once, for
 18 * each process that owns it. Non-shared memory is counted
 19 * accurately.
 20 */
 21void task_mem(struct seq_file *m, struct mm_struct *mm)
 22{
 
 23	struct vm_area_struct *vma;
 24	struct vm_region *region;
 25	struct rb_node *p;
 26	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 27        
 28	mmap_read_lock(mm);
 29	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 30		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 31
 32		bytes += kobjsize(vma);
 33
 34		region = vma->vm_region;
 35		if (region) {
 36			size = kobjsize(region);
 37			size += region->vm_end - region->vm_start;
 38		} else {
 39			size = vma->vm_end - vma->vm_start;
 40		}
 41
 42		if (atomic_read(&mm->mm_count) > 1 ||
 43		    vma->vm_flags & VM_MAYSHARE) {
 44			sbytes += size;
 45		} else {
 46			bytes += size;
 47			if (region)
 48				slack = region->vm_end - vma->vm_end;
 49		}
 50	}
 51
 52	if (atomic_read(&mm->mm_count) > 1)
 53		sbytes += kobjsize(mm);
 54	else
 55		bytes += kobjsize(mm);
 56	
 57	if (current->fs && current->fs->users > 1)
 58		sbytes += kobjsize(current->fs);
 59	else
 60		bytes += kobjsize(current->fs);
 61
 62	if (current->files && atomic_read(&current->files->count) > 1)
 63		sbytes += kobjsize(current->files);
 64	else
 65		bytes += kobjsize(current->files);
 66
 67	if (current->sighand && refcount_read(&current->sighand->count) > 1)
 68		sbytes += kobjsize(current->sighand);
 69	else
 70		bytes += kobjsize(current->sighand);
 71
 72	bytes += kobjsize(current); /* includes kernel stack */
 73
 
 
 74	seq_printf(m,
 75		"Mem:\t%8lu bytes\n"
 76		"Slack:\t%8lu bytes\n"
 77		"Shared:\t%8lu bytes\n",
 78		bytes, slack, sbytes);
 79
 80	mmap_read_unlock(mm);
 81}
 82
 83unsigned long task_vsize(struct mm_struct *mm)
 84{
 
 85	struct vm_area_struct *vma;
 86	struct rb_node *p;
 87	unsigned long vsize = 0;
 88
 89	mmap_read_lock(mm);
 90	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 91		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 92		vsize += vma->vm_end - vma->vm_start;
 93	}
 94	mmap_read_unlock(mm);
 95	return vsize;
 96}
 97
 98unsigned long task_statm(struct mm_struct *mm,
 99			 unsigned long *shared, unsigned long *text,
100			 unsigned long *data, unsigned long *resident)
101{
 
102	struct vm_area_struct *vma;
103	struct vm_region *region;
104	struct rb_node *p;
105	unsigned long size = kobjsize(mm);
106
107	mmap_read_lock(mm);
108	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
109		vma = rb_entry(p, struct vm_area_struct, vm_rb);
110		size += kobjsize(vma);
111		region = vma->vm_region;
112		if (region) {
113			size += kobjsize(region);
114			size += region->vm_end - region->vm_start;
115		}
116	}
117
118	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
119		>> PAGE_SHIFT;
120	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
121		>> PAGE_SHIFT;
122	mmap_read_unlock(mm);
123	size >>= PAGE_SHIFT;
124	size += *text + *data;
125	*resident = size;
126	return size;
127}
128
129static int is_stack(struct vm_area_struct *vma)
130{
131	struct mm_struct *mm = vma->vm_mm;
132
133	/*
134	 * We make no effort to guess what a given thread considers to be
135	 * its "stack".  It's not even well-defined for programs written
136	 * languages like Go.
137	 */
138	return vma->vm_start <= mm->start_stack &&
139		vma->vm_end >= mm->start_stack;
140}
141
142/*
143 * display a single VMA to a sequenced file
144 */
145static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
146{
147	struct mm_struct *mm = vma->vm_mm;
148	unsigned long ino = 0;
149	struct file *file;
150	dev_t dev = 0;
151	int flags;
152	unsigned long long pgoff = 0;
153
154	flags = vma->vm_flags;
155	file = vma->vm_file;
156
157	if (file) {
158		struct inode *inode = file_inode(vma->vm_file);
159		dev = inode->i_sb->s_dev;
160		ino = inode->i_ino;
161		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
162	}
163
164	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
165	seq_printf(m,
166		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
167		   vma->vm_start,
168		   vma->vm_end,
169		   flags & VM_READ ? 'r' : '-',
170		   flags & VM_WRITE ? 'w' : '-',
171		   flags & VM_EXEC ? 'x' : '-',
172		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
173		   pgoff,
174		   MAJOR(dev), MINOR(dev), ino);
175
176	if (file) {
177		seq_pad(m, ' ');
178		seq_file_path(m, file, "");
179	} else if (mm && is_stack(vma)) {
180		seq_pad(m, ' ');
181		seq_puts(m, "[stack]");
182	}
183
184	seq_putc(m, '\n');
185	return 0;
186}
187
188/*
189 * display mapping lines for a particular process's /proc/pid/maps
190 */
191static int show_map(struct seq_file *m, void *_p)
192{
193	struct rb_node *p = _p;
 
 
 
 
 
 
 
 
 
 
 
 
194
195	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
196}
197
198static void *m_start(struct seq_file *m, loff_t *pos)
199{
200	struct proc_maps_private *priv = m->private;
 
201	struct mm_struct *mm;
202	struct rb_node *p;
203	loff_t n = *pos;
 
 
204
205	/* pin the task and mm whilst we play with them */
206	priv->task = get_proc_task(priv->inode);
207	if (!priv->task)
208		return ERR_PTR(-ESRCH);
209
210	mm = priv->mm;
211	if (!mm || !mmget_not_zero(mm))
 
 
212		return NULL;
 
213
214	if (mmap_read_lock_killable(mm)) {
215		mmput(mm);
 
 
216		return ERR_PTR(-EINTR);
217	}
218
219	/* start from the Nth VMA */
220	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
221		if (n-- == 0)
222			return p;
223
224	mmap_read_unlock(mm);
225	mmput(mm);
226	return NULL;
227}
228
229static void m_stop(struct seq_file *m, void *_vml)
230{
231	struct proc_maps_private *priv = m->private;
 
232
233	if (!IS_ERR_OR_NULL(_vml)) {
234		mmap_read_unlock(priv->mm);
235		mmput(priv->mm);
236	}
237	if (priv->task) {
238		put_task_struct(priv->task);
239		priv->task = NULL;
240	}
241}
242
243static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
244{
245	struct rb_node *p = _p;
246
247	(*pos)++;
248	return p ? rb_next(p) : NULL;
249}
250
251static const struct seq_operations proc_pid_maps_ops = {
252	.start	= m_start,
253	.next	= m_next,
254	.stop	= m_stop,
255	.show	= show_map
256};
257
258static int maps_open(struct inode *inode, struct file *file,
259		     const struct seq_operations *ops)
260{
261	struct proc_maps_private *priv;
262
263	priv = __seq_open_private(file, ops, sizeof(*priv));
264	if (!priv)
265		return -ENOMEM;
266
267	priv->inode = inode;
268	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
269	if (IS_ERR(priv->mm)) {
270		int err = PTR_ERR(priv->mm);
271
272		seq_release_private(inode, file);
273		return err;
274	}
275
276	return 0;
277}
278
279
280static int map_release(struct inode *inode, struct file *file)
281{
282	struct seq_file *seq = file->private_data;
283	struct proc_maps_private *priv = seq->private;
284
285	if (priv->mm)
286		mmdrop(priv->mm);
287
288	return seq_release_private(inode, file);
289}
290
291static int pid_maps_open(struct inode *inode, struct file *file)
292{
293	return maps_open(inode, file, &proc_pid_maps_ops);
294}
295
296const struct file_operations proc_pid_maps_operations = {
297	.open		= pid_maps_open,
298	.read		= seq_read,
299	.llseek		= seq_lseek,
300	.release	= map_release,
301};
302