Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v3.15
  1/*
  2 * Copyright (C) 2014 Davidlohr Bueso.
  3 */
  4#include <linux/sched.h>
  5#include <linux/mm.h>
  6#include <linux/vmacache.h>
  7
  8/*
  9 * Flush vma caches for threads that share a given mm.
 10 *
 11 * The operation is safe because the caller holds the mmap_sem
 12 * exclusively and other threads accessing the vma cache will
 13 * have mmap_sem held at least for read, so no extra locking
 14 * is required to maintain the vma cache.
 15 */
 16void vmacache_flush_all(struct mm_struct *mm)
 17{
 18	struct task_struct *g, *p;
 19
 
 
 
 
 
 
 
 
 
 
 
 
 20	rcu_read_lock();
 21	for_each_process_thread(g, p) {
 22		/*
 23		 * Only flush the vmacache pointers as the
 24		 * mm seqnum is already set and curr's will
 25		 * be set upon invalidation when the next
 26		 * lookup is done.
 27		 */
 28		if (mm == p->mm)
 29			vmacache_flush(p);
 30	}
 31	rcu_read_unlock();
 32}
 33
 34/*
 35 * This task may be accessing a foreign mm via (for example)
 36 * get_user_pages()->find_vma().  The vmacache is task-local and this
 37 * task's vmacache pertains to a different mm (ie, its own).  There is
 38 * nothing we can do here.
 39 *
 40 * Also handle the case where a kernel thread has adopted this mm via use_mm().
 41 * That kernel thread's vmacache is not applicable to this mm.
 42 */
 43static bool vmacache_valid_mm(struct mm_struct *mm)
 44{
 45	return current->mm == mm && !(current->flags & PF_KTHREAD);
 46}
 47
 48void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
 49{
 50	if (vmacache_valid_mm(newvma->vm_mm))
 51		current->vmacache[VMACACHE_HASH(addr)] = newvma;
 52}
 53
 54static bool vmacache_valid(struct mm_struct *mm)
 55{
 56	struct task_struct *curr;
 57
 58	if (!vmacache_valid_mm(mm))
 59		return false;
 60
 61	curr = current;
 62	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
 63		/*
 64		 * First attempt will always be invalid, initialize
 65		 * the new cache for this task here.
 66		 */
 67		curr->vmacache_seqnum = mm->vmacache_seqnum;
 68		vmacache_flush(curr);
 69		return false;
 70	}
 71	return true;
 72}
 73
 74struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
 75{
 76	int i;
 77
 
 
 78	if (!vmacache_valid(mm))
 79		return NULL;
 80
 81	for (i = 0; i < VMACACHE_SIZE; i++) {
 82		struct vm_area_struct *vma = current->vmacache[i];
 83
 84		if (!vma)
 85			continue;
 86		if (WARN_ON_ONCE(vma->vm_mm != mm))
 87			break;
 88		if (vma->vm_start <= addr && vma->vm_end > addr)
 
 89			return vma;
 
 90	}
 91
 92	return NULL;
 93}
 94
 95#ifndef CONFIG_MMU
 96struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 97					   unsigned long start,
 98					   unsigned long end)
 99{
100	int i;
101
 
 
102	if (!vmacache_valid(mm))
103		return NULL;
104
105	for (i = 0; i < VMACACHE_SIZE; i++) {
106		struct vm_area_struct *vma = current->vmacache[i];
107
108		if (vma && vma->vm_start == start && vma->vm_end == end)
 
109			return vma;
 
110	}
111
112	return NULL;
113}
114#endif
v4.10.11
  1/*
  2 * Copyright (C) 2014 Davidlohr Bueso.
  3 */
  4#include <linux/sched.h>
  5#include <linux/mm.h>
  6#include <linux/vmacache.h>
  7
  8/*
  9 * Flush vma caches for threads that share a given mm.
 10 *
 11 * The operation is safe because the caller holds the mmap_sem
 12 * exclusively and other threads accessing the vma cache will
 13 * have mmap_sem held at least for read, so no extra locking
 14 * is required to maintain the vma cache.
 15 */
 16void vmacache_flush_all(struct mm_struct *mm)
 17{
 18	struct task_struct *g, *p;
 19
 20	count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
 21
 22	/*
 23	 * Single threaded tasks need not iterate the entire
 24	 * list of process. We can avoid the flushing as well
 25	 * since the mm's seqnum was increased and don't have
 26	 * to worry about other threads' seqnum. Current's
 27	 * flush will occur upon the next lookup.
 28	 */
 29	if (atomic_read(&mm->mm_users) == 1)
 30		return;
 31
 32	rcu_read_lock();
 33	for_each_process_thread(g, p) {
 34		/*
 35		 * Only flush the vmacache pointers as the
 36		 * mm seqnum is already set and curr's will
 37		 * be set upon invalidation when the next
 38		 * lookup is done.
 39		 */
 40		if (mm == p->mm)
 41			vmacache_flush(p);
 42	}
 43	rcu_read_unlock();
 44}
 45
 46/*
 47 * This task may be accessing a foreign mm via (for example)
 48 * get_user_pages()->find_vma().  The vmacache is task-local and this
 49 * task's vmacache pertains to a different mm (ie, its own).  There is
 50 * nothing we can do here.
 51 *
 52 * Also handle the case where a kernel thread has adopted this mm via use_mm().
 53 * That kernel thread's vmacache is not applicable to this mm.
 54 */
 55static inline bool vmacache_valid_mm(struct mm_struct *mm)
 56{
 57	return current->mm == mm && !(current->flags & PF_KTHREAD);
 58}
 59
 60void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
 61{
 62	if (vmacache_valid_mm(newvma->vm_mm))
 63		current->vmacache[VMACACHE_HASH(addr)] = newvma;
 64}
 65
 66static bool vmacache_valid(struct mm_struct *mm)
 67{
 68	struct task_struct *curr;
 69
 70	if (!vmacache_valid_mm(mm))
 71		return false;
 72
 73	curr = current;
 74	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
 75		/*
 76		 * First attempt will always be invalid, initialize
 77		 * the new cache for this task here.
 78		 */
 79		curr->vmacache_seqnum = mm->vmacache_seqnum;
 80		vmacache_flush(curr);
 81		return false;
 82	}
 83	return true;
 84}
 85
 86struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
 87{
 88	int i;
 89
 90	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
 91
 92	if (!vmacache_valid(mm))
 93		return NULL;
 94
 95	for (i = 0; i < VMACACHE_SIZE; i++) {
 96		struct vm_area_struct *vma = current->vmacache[i];
 97
 98		if (!vma)
 99			continue;
100		if (WARN_ON_ONCE(vma->vm_mm != mm))
101			break;
102		if (vma->vm_start <= addr && vma->vm_end > addr) {
103			count_vm_vmacache_event(VMACACHE_FIND_HITS);
104			return vma;
105		}
106	}
107
108	return NULL;
109}
110
111#ifndef CONFIG_MMU
112struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
113					   unsigned long start,
114					   unsigned long end)
115{
116	int i;
117
118	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
119
120	if (!vmacache_valid(mm))
121		return NULL;
122
123	for (i = 0; i < VMACACHE_SIZE; i++) {
124		struct vm_area_struct *vma = current->vmacache[i];
125
126		if (vma && vma->vm_start == start && vma->vm_end == end) {
127			count_vm_vmacache_event(VMACACHE_FIND_HITS);
128			return vma;
129		}
130	}
131
132	return NULL;
133}
134#endif