Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/mm.h>
  4#include <linux/file.h>
  5#include <linux/fdtable.h>
  6#include <linux/fs_struct.h>
  7#include <linux/mount.h>
  8#include <linux/ptrace.h>
  9#include <linux/slab.h>
 10#include <linux/seq_file.h>
 11#include <linux/sched/mm.h>
 12
 13#include "internal.h"
 14
 15/*
 16 * Logic: we've got two memory sums for each process, "shared", and
 17 * "non-shared". Shared memory may get counted more than once, for
 18 * each process that owns it. Non-shared memory is counted
 19 * accurately.
 20 */
 21void task_mem(struct seq_file *m, struct mm_struct *mm)
 22{
 23	VMA_ITERATOR(vmi, mm, 0);
 24	struct vm_area_struct *vma;
 25	struct vm_region *region;
 
 26	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 
 
 
 
 27
 28	mmap_read_lock(mm);
 29	for_each_vma(vmi, vma) {
 30		bytes += kobjsize(vma);
 31
 32		region = vma->vm_region;
 33		if (region) {
 34			size = kobjsize(region);
 35			size += region->vm_end - region->vm_start;
 36		} else {
 37			size = vma->vm_end - vma->vm_start;
 38		}
 39
 40		if (atomic_read(&mm->mm_count) > 1 ||
 41		    is_nommu_shared_mapping(vma->vm_flags)) {
 42			sbytes += size;
 43		} else {
 44			bytes += size;
 45			if (region)
 46				slack = region->vm_end - vma->vm_end;
 47		}
 48	}
 49
 50	if (atomic_read(&mm->mm_count) > 1)
 51		sbytes += kobjsize(mm);
 52	else
 53		bytes += kobjsize(mm);
 54
 55	if (current->fs && current->fs->users > 1)
 56		sbytes += kobjsize(current->fs);
 57	else
 58		bytes += kobjsize(current->fs);
 59
 60	if (current->files && atomic_read(&current->files->count) > 1)
 61		sbytes += kobjsize(current->files);
 62	else
 63		bytes += kobjsize(current->files);
 64
 65	if (current->sighand && refcount_read(&current->sighand->count) > 1)
 66		sbytes += kobjsize(current->sighand);
 67	else
 68		bytes += kobjsize(current->sighand);
 69
 70	bytes += kobjsize(current); /* includes kernel stack */
 71
 72	mmap_read_unlock(mm);
 73
 74	seq_printf(m,
 75		"Mem:\t%8lu bytes\n"
 76		"Slack:\t%8lu bytes\n"
 77		"Shared:\t%8lu bytes\n",
 78		bytes, slack, sbytes);
 
 
 79}
 80
 81unsigned long task_vsize(struct mm_struct *mm)
 82{
 83	VMA_ITERATOR(vmi, mm, 0);
 84	struct vm_area_struct *vma;
 
 85	unsigned long vsize = 0;
 86
 87	mmap_read_lock(mm);
 88	for_each_vma(vmi, vma)
 
 89		vsize += vma->vm_end - vma->vm_start;
 90	mmap_read_unlock(mm);
 
 91	return vsize;
 92}
 93
 94unsigned long task_statm(struct mm_struct *mm,
 95			 unsigned long *shared, unsigned long *text,
 96			 unsigned long *data, unsigned long *resident)
 97{
 98	VMA_ITERATOR(vmi, mm, 0);
 99	struct vm_area_struct *vma;
100	struct vm_region *region;
 
101	unsigned long size = kobjsize(mm);
102
103	mmap_read_lock(mm);
104	for_each_vma(vmi, vma) {
 
105		size += kobjsize(vma);
106		region = vma->vm_region;
107		if (region) {
108			size += kobjsize(region);
109			size += region->vm_end - region->vm_start;
110		}
111	}
112
113	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
114		>> PAGE_SHIFT;
115	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
116		>> PAGE_SHIFT;
117	mmap_read_unlock(mm);
118	size >>= PAGE_SHIFT;
119	size += *text + *data;
120	*resident = size;
121	return size;
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124/*
125 * display a single VMA to a sequenced file
126 */
127static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
 
128{
129	struct mm_struct *mm = vma->vm_mm;
 
130	unsigned long ino = 0;
131	struct file *file;
132	dev_t dev = 0;
133	int flags;
134	unsigned long long pgoff = 0;
135
136	flags = vma->vm_flags;
137	file = vma->vm_file;
138
139	if (file) {
140		struct inode *inode = file_inode(vma->vm_file);
141		dev = inode->i_sb->s_dev;
142		ino = inode->i_ino;
143		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
144	}
145
146	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
147	seq_printf(m,
148		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
149		   vma->vm_start,
150		   vma->vm_end,
151		   flags & VM_READ ? 'r' : '-',
152		   flags & VM_WRITE ? 'w' : '-',
153		   flags & VM_EXEC ? 'x' : '-',
154		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
155		   pgoff,
156		   MAJOR(dev), MINOR(dev), ino);
157
158	if (file) {
159		seq_pad(m, ' ');
160		seq_path(m, file_user_path(file), "");
161	} else if (mm && vma_is_initial_stack(vma)) {
162		seq_pad(m, ' ');
163		seq_puts(m, "[stack]");
164	}
165
166	seq_putc(m, '\n');
167	return 0;
168}
169
170/*
171 * display mapping lines for a particular process's /proc/pid/maps
172 */
173static int show_map(struct seq_file *m, void *_p)
174{
175	return nommu_vma_show(m, _p);
 
 
 
176}
177
178static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
179						loff_t *ppos)
180{
181	struct vm_area_struct *vma = vma_next(&priv->iter);
182
183	if (vma) {
184		*ppos = vma->vm_start;
185	} else {
186		*ppos = -1UL;
187	}
188
189	return vma;
 
 
190}
191
192static void *m_start(struct seq_file *m, loff_t *ppos)
193{
194	struct proc_maps_private *priv = m->private;
195	unsigned long last_addr = *ppos;
196	struct mm_struct *mm;
197
198	/* See proc_get_vma(). Zero at the start or after lseek. */
199	if (last_addr == -1UL)
200		return NULL;
201
202	/* pin the task and mm whilst we play with them */
203	priv->task = get_proc_task(priv->inode);
204	if (!priv->task)
205		return ERR_PTR(-ESRCH);
206
207	mm = priv->mm;
208	if (!mm || !mmget_not_zero(mm)) {
209		put_task_struct(priv->task);
210		priv->task = NULL;
211		return NULL;
212	}
213
214	if (mmap_read_lock_killable(mm)) {
215		mmput(mm);
216		put_task_struct(priv->task);
217		priv->task = NULL;
218		return ERR_PTR(-EINTR);
219	}
220
221	vma_iter_init(&priv->iter, mm, last_addr);
 
 
 
 
222
223	return proc_get_vma(priv, ppos);
 
 
224}
225
226static void m_stop(struct seq_file *m, void *v)
227{
228	struct proc_maps_private *priv = m->private;
229	struct mm_struct *mm = priv->mm;
230
231	if (!priv->task)
232		return;
233
234	mmap_read_unlock(mm);
235	mmput(mm);
236	put_task_struct(priv->task);
237	priv->task = NULL;
 
 
 
 
238}
239
240static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
241{
242	return proc_get_vma(m->private, ppos);
 
 
 
243}
244
245static const struct seq_operations proc_pid_maps_ops = {
246	.start	= m_start,
247	.next	= m_next,
248	.stop	= m_stop,
249	.show	= show_map
 
 
 
 
 
 
 
250};
251
252static int maps_open(struct inode *inode, struct file *file,
253		     const struct seq_operations *ops)
254{
255	struct proc_maps_private *priv;
256
257	priv = __seq_open_private(file, ops, sizeof(*priv));
258	if (!priv)
259		return -ENOMEM;
260
261	priv->inode = inode;
262	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
263	if (IS_ERR(priv->mm)) {
264		int err = PTR_ERR(priv->mm);
265
266		seq_release_private(inode, file);
267		return err;
268	}
269
270	return 0;
271}
272
273
274static int map_release(struct inode *inode, struct file *file)
275{
276	struct seq_file *seq = file->private_data;
277	struct proc_maps_private *priv = seq->private;
278
279	if (priv->mm)
280		mmdrop(priv->mm);
281
282	return seq_release_private(inode, file);
283}
284
285static int pid_maps_open(struct inode *inode, struct file *file)
286{
287	return maps_open(inode, file, &proc_pid_maps_ops);
288}
289
 
 
 
 
 
290const struct file_operations proc_pid_maps_operations = {
291	.open		= pid_maps_open,
 
 
 
 
 
 
 
292	.read		= seq_read,
293	.llseek		= seq_lseek,
294	.release	= map_release,
295};
296
v4.10.11
 
  1
  2#include <linux/mm.h>
  3#include <linux/file.h>
  4#include <linux/fdtable.h>
  5#include <linux/fs_struct.h>
  6#include <linux/mount.h>
  7#include <linux/ptrace.h>
  8#include <linux/slab.h>
  9#include <linux/seq_file.h>
 
 
 10#include "internal.h"
 11
 12/*
 13 * Logic: we've got two memory sums for each process, "shared", and
 14 * "non-shared". Shared memory may get counted more than once, for
 15 * each process that owns it. Non-shared memory is counted
 16 * accurately.
 17 */
 18void task_mem(struct seq_file *m, struct mm_struct *mm)
 19{
 
 20	struct vm_area_struct *vma;
 21	struct vm_region *region;
 22	struct rb_node *p;
 23	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
 24        
 25	down_read(&mm->mmap_sem);
 26	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 27		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 28
 
 
 29		bytes += kobjsize(vma);
 30
 31		region = vma->vm_region;
 32		if (region) {
 33			size = kobjsize(region);
 34			size += region->vm_end - region->vm_start;
 35		} else {
 36			size = vma->vm_end - vma->vm_start;
 37		}
 38
 39		if (atomic_read(&mm->mm_count) > 1 ||
 40		    vma->vm_flags & VM_MAYSHARE) {
 41			sbytes += size;
 42		} else {
 43			bytes += size;
 44			if (region)
 45				slack = region->vm_end - vma->vm_end;
 46		}
 47	}
 48
 49	if (atomic_read(&mm->mm_count) > 1)
 50		sbytes += kobjsize(mm);
 51	else
 52		bytes += kobjsize(mm);
 53	
 54	if (current->fs && current->fs->users > 1)
 55		sbytes += kobjsize(current->fs);
 56	else
 57		bytes += kobjsize(current->fs);
 58
 59	if (current->files && atomic_read(&current->files->count) > 1)
 60		sbytes += kobjsize(current->files);
 61	else
 62		bytes += kobjsize(current->files);
 63
 64	if (current->sighand && atomic_read(&current->sighand->count) > 1)
 65		sbytes += kobjsize(current->sighand);
 66	else
 67		bytes += kobjsize(current->sighand);
 68
 69	bytes += kobjsize(current); /* includes kernel stack */
 70
 
 
 71	seq_printf(m,
 72		"Mem:\t%8lu bytes\n"
 73		"Slack:\t%8lu bytes\n"
 74		"Shared:\t%8lu bytes\n",
 75		bytes, slack, sbytes);
 76
 77	up_read(&mm->mmap_sem);
 78}
 79
 80unsigned long task_vsize(struct mm_struct *mm)
 81{
 
 82	struct vm_area_struct *vma;
 83	struct rb_node *p;
 84	unsigned long vsize = 0;
 85
 86	down_read(&mm->mmap_sem);
 87	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 88		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 89		vsize += vma->vm_end - vma->vm_start;
 90	}
 91	up_read(&mm->mmap_sem);
 92	return vsize;
 93}
 94
 95unsigned long task_statm(struct mm_struct *mm,
 96			 unsigned long *shared, unsigned long *text,
 97			 unsigned long *data, unsigned long *resident)
 98{
 
 99	struct vm_area_struct *vma;
100	struct vm_region *region;
101	struct rb_node *p;
102	unsigned long size = kobjsize(mm);
103
104	down_read(&mm->mmap_sem);
105	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106		vma = rb_entry(p, struct vm_area_struct, vm_rb);
107		size += kobjsize(vma);
108		region = vma->vm_region;
109		if (region) {
110			size += kobjsize(region);
111			size += region->vm_end - region->vm_start;
112		}
113	}
114
115	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116		>> PAGE_SHIFT;
117	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118		>> PAGE_SHIFT;
119	up_read(&mm->mmap_sem);
120	size >>= PAGE_SHIFT;
121	size += *text + *data;
122	*resident = size;
123	return size;
124}
125
126static int is_stack(struct proc_maps_private *priv,
127		    struct vm_area_struct *vma)
128{
129	struct mm_struct *mm = vma->vm_mm;
130
131	/*
132	 * We make no effort to guess what a given thread considers to be
133	 * its "stack".  It's not even well-defined for programs written
134	 * languages like Go.
135	 */
136	return vma->vm_start <= mm->start_stack &&
137		vma->vm_end >= mm->start_stack;
138}
139
140/*
141 * display a single VMA to a sequenced file
142 */
143static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
144			  int is_pid)
145{
146	struct mm_struct *mm = vma->vm_mm;
147	struct proc_maps_private *priv = m->private;
148	unsigned long ino = 0;
149	struct file *file;
150	dev_t dev = 0;
151	int flags;
152	unsigned long long pgoff = 0;
153
154	flags = vma->vm_flags;
155	file = vma->vm_file;
156
157	if (file) {
158		struct inode *inode = file_inode(vma->vm_file);
159		dev = inode->i_sb->s_dev;
160		ino = inode->i_ino;
161		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
162	}
163
164	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
165	seq_printf(m,
166		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
167		   vma->vm_start,
168		   vma->vm_end,
169		   flags & VM_READ ? 'r' : '-',
170		   flags & VM_WRITE ? 'w' : '-',
171		   flags & VM_EXEC ? 'x' : '-',
172		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
173		   pgoff,
174		   MAJOR(dev), MINOR(dev), ino);
175
176	if (file) {
177		seq_pad(m, ' ');
178		seq_file_path(m, file, "");
179	} else if (mm && is_stack(priv, vma)) {
180		seq_pad(m, ' ');
181		seq_printf(m, "[stack]");
182	}
183
184	seq_putc(m, '\n');
185	return 0;
186}
187
188/*
189 * display mapping lines for a particular process's /proc/pid/maps
190 */
191static int show_map(struct seq_file *m, void *_p, int is_pid)
192{
193	struct rb_node *p = _p;
194
195	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
196			      is_pid);
197}
198
199static int show_pid_map(struct seq_file *m, void *_p)
 
200{
201	return show_map(m, _p, 1);
202}
 
 
 
 
 
203
204static int show_tid_map(struct seq_file *m, void *_p)
205{
206	return show_map(m, _p, 0);
207}
208
209static void *m_start(struct seq_file *m, loff_t *pos)
210{
211	struct proc_maps_private *priv = m->private;
 
212	struct mm_struct *mm;
213	struct rb_node *p;
214	loff_t n = *pos;
 
 
215
216	/* pin the task and mm whilst we play with them */
217	priv->task = get_proc_task(priv->inode);
218	if (!priv->task)
219		return ERR_PTR(-ESRCH);
220
221	mm = priv->mm;
222	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
 
 
223		return NULL;
 
 
 
 
 
 
 
 
224
225	down_read(&mm->mmap_sem);
226	/* start from the Nth VMA */
227	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
228		if (n-- == 0)
229			return p;
230
231	up_read(&mm->mmap_sem);
232	mmput(mm);
233	return NULL;
234}
235
236static void m_stop(struct seq_file *m, void *_vml)
237{
238	struct proc_maps_private *priv = m->private;
 
 
 
 
239
240	if (!IS_ERR_OR_NULL(_vml)) {
241		up_read(&priv->mm->mmap_sem);
242		mmput(priv->mm);
243	}
244	if (priv->task) {
245		put_task_struct(priv->task);
246		priv->task = NULL;
247	}
248}
249
250static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
251{
252	struct rb_node *p = _p;
253
254	(*pos)++;
255	return p ? rb_next(p) : NULL;
256}
257
258static const struct seq_operations proc_pid_maps_ops = {
259	.start	= m_start,
260	.next	= m_next,
261	.stop	= m_stop,
262	.show	= show_pid_map
263};
264
265static const struct seq_operations proc_tid_maps_ops = {
266	.start	= m_start,
267	.next	= m_next,
268	.stop	= m_stop,
269	.show	= show_tid_map
270};
271
272static int maps_open(struct inode *inode, struct file *file,
273		     const struct seq_operations *ops)
274{
275	struct proc_maps_private *priv;
276
277	priv = __seq_open_private(file, ops, sizeof(*priv));
278	if (!priv)
279		return -ENOMEM;
280
281	priv->inode = inode;
282	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
283	if (IS_ERR(priv->mm)) {
284		int err = PTR_ERR(priv->mm);
285
286		seq_release_private(inode, file);
287		return err;
288	}
289
290	return 0;
291}
292
293
294static int map_release(struct inode *inode, struct file *file)
295{
296	struct seq_file *seq = file->private_data;
297	struct proc_maps_private *priv = seq->private;
298
299	if (priv->mm)
300		mmdrop(priv->mm);
301
302	return seq_release_private(inode, file);
303}
304
305static int pid_maps_open(struct inode *inode, struct file *file)
306{
307	return maps_open(inode, file, &proc_pid_maps_ops);
308}
309
310static int tid_maps_open(struct inode *inode, struct file *file)
311{
312	return maps_open(inode, file, &proc_tid_maps_ops);
313}
314
315const struct file_operations proc_pid_maps_operations = {
316	.open		= pid_maps_open,
317	.read		= seq_read,
318	.llseek		= seq_lseek,
319	.release	= map_release,
320};
321
322const struct file_operations proc_tid_maps_operations = {
323	.open		= tid_maps_open,
324	.read		= seq_read,
325	.llseek		= seq_lseek,
326	.release	= map_release,
327};
328