Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2014 Davidlohr Bueso.
  4 */
  5#include <linux/sched/signal.h>
  6#include <linux/sched/task.h>
  7#include <linux/mm.h>
  8#include <linux/vmacache.h>
  9#include <asm/pgtable.h>
 10
 11/*
 12 * Hash based on the pmd of addr if configured with MMU, which provides a good
 13 * hit rate for workloads with spatial locality.  Otherwise, use pages.
 
 
 
 
 14 */
 15#ifdef CONFIG_MMU
 16#define VMACACHE_SHIFT	PMD_SHIFT
 17#else
 18#define VMACACHE_SHIFT	PAGE_SHIFT
 19#endif
 20#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21
 22/*
 23 * This task may be accessing a foreign mm via (for example)
 24 * get_user_pages()->find_vma().  The vmacache is task-local and this
 25 * task's vmacache pertains to a different mm (ie, its own).  There is
 26 * nothing we can do here.
 27 *
 28 * Also handle the case where a kernel thread has adopted this mm via use_mm().
 29 * That kernel thread's vmacache is not applicable to this mm.
 30 */
 31static inline bool vmacache_valid_mm(struct mm_struct *mm)
 32{
 33	return current->mm == mm && !(current->flags & PF_KTHREAD);
 34}
 35
 36void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
 37{
 38	if (vmacache_valid_mm(newvma->vm_mm))
 39		current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
 40}
 41
 42static bool vmacache_valid(struct mm_struct *mm)
 43{
 44	struct task_struct *curr;
 45
 46	if (!vmacache_valid_mm(mm))
 47		return false;
 48
 49	curr = current;
 50	if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
 51		/*
 52		 * First attempt will always be invalid, initialize
 53		 * the new cache for this task here.
 54		 */
 55		curr->vmacache.seqnum = mm->vmacache_seqnum;
 56		vmacache_flush(curr);
 57		return false;
 58	}
 59	return true;
 60}
 61
 62struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
 63{
 64	int idx = VMACACHE_HASH(addr);
 65	int i;
 66
 67	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
 68
 69	if (!vmacache_valid(mm))
 70		return NULL;
 71
 
 
 72	for (i = 0; i < VMACACHE_SIZE; i++) {
 73		struct vm_area_struct *vma = current->vmacache.vmas[idx];
 74
 75		if (vma) {
 76#ifdef CONFIG_DEBUG_VM_VMACACHE
 77			if (WARN_ON_ONCE(vma->vm_mm != mm))
 78				break;
 79#endif
 80			if (vma->vm_start <= addr && vma->vm_end > addr) {
 81				count_vm_vmacache_event(VMACACHE_FIND_HITS);
 82				return vma;
 83			}
 84		}
 85		if (++idx == VMACACHE_SIZE)
 86			idx = 0;
 87	}
 88
 89	return NULL;
 90}
 91
 92#ifndef CONFIG_MMU
 93struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 94					   unsigned long start,
 95					   unsigned long end)
 96{
 97	int idx = VMACACHE_HASH(start);
 98	int i;
 99
100	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
101
102	if (!vmacache_valid(mm))
103		return NULL;
104
 
 
105	for (i = 0; i < VMACACHE_SIZE; i++) {
106		struct vm_area_struct *vma = current->vmacache.vmas[idx];
107
108		if (vma && vma->vm_start == start && vma->vm_end == end) {
109			count_vm_vmacache_event(VMACACHE_FIND_HITS);
110			return vma;
111		}
112		if (++idx == VMACACHE_SIZE)
113			idx = 0;
114	}
115
116	return NULL;
117}
118#endif
v4.6
 
  1/*
  2 * Copyright (C) 2014 Davidlohr Bueso.
  3 */
  4#include <linux/sched.h>
 
  5#include <linux/mm.h>
  6#include <linux/vmacache.h>
 
  7
  8/*
  9 * Flush vma caches for threads that share a given mm.
 10 *
 11 * The operation is safe because the caller holds the mmap_sem
 12 * exclusively and other threads accessing the vma cache will
 13 * have mmap_sem held at least for read, so no extra locking
 14 * is required to maintain the vma cache.
 15 */
 16void vmacache_flush_all(struct mm_struct *mm)
 17{
 18	struct task_struct *g, *p;
 19
 20	count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
 21
 22	/*
 23	 * Single threaded tasks need not iterate the entire
 24	 * list of process. We can avoid the flushing as well
 25	 * since the mm's seqnum was increased and don't have
 26	 * to worry about other threads' seqnum. Current's
 27	 * flush will occur upon the next lookup.
 28	 */
 29	if (atomic_read(&mm->mm_users) == 1)
 30		return;
 31
 32	rcu_read_lock();
 33	for_each_process_thread(g, p) {
 34		/*
 35		 * Only flush the vmacache pointers as the
 36		 * mm seqnum is already set and curr's will
 37		 * be set upon invalidation when the next
 38		 * lookup is done.
 39		 */
 40		if (mm == p->mm)
 41			vmacache_flush(p);
 42	}
 43	rcu_read_unlock();
 44}
 45
 46/*
 47 * This task may be accessing a foreign mm via (for example)
 48 * get_user_pages()->find_vma().  The vmacache is task-local and this
 49 * task's vmacache pertains to a different mm (ie, its own).  There is
 50 * nothing we can do here.
 51 *
 52 * Also handle the case where a kernel thread has adopted this mm via use_mm().
 53 * That kernel thread's vmacache is not applicable to this mm.
 54 */
 55static inline bool vmacache_valid_mm(struct mm_struct *mm)
 56{
 57	return current->mm == mm && !(current->flags & PF_KTHREAD);
 58}
 59
 60void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
 61{
 62	if (vmacache_valid_mm(newvma->vm_mm))
 63		current->vmacache[VMACACHE_HASH(addr)] = newvma;
 64}
 65
 66static bool vmacache_valid(struct mm_struct *mm)
 67{
 68	struct task_struct *curr;
 69
 70	if (!vmacache_valid_mm(mm))
 71		return false;
 72
 73	curr = current;
 74	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
 75		/*
 76		 * First attempt will always be invalid, initialize
 77		 * the new cache for this task here.
 78		 */
 79		curr->vmacache_seqnum = mm->vmacache_seqnum;
 80		vmacache_flush(curr);
 81		return false;
 82	}
 83	return true;
 84}
 85
 86struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
 87{
 
 88	int i;
 89
 
 
 90	if (!vmacache_valid(mm))
 91		return NULL;
 92
 93	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
 94
 95	for (i = 0; i < VMACACHE_SIZE; i++) {
 96		struct vm_area_struct *vma = current->vmacache[i];
 97
 98		if (!vma)
 99			continue;
100		if (WARN_ON_ONCE(vma->vm_mm != mm))
101			break;
102		if (vma->vm_start <= addr && vma->vm_end > addr) {
103			count_vm_vmacache_event(VMACACHE_FIND_HITS);
104			return vma;
 
 
105		}
 
 
106	}
107
108	return NULL;
109}
110
111#ifndef CONFIG_MMU
112struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
113					   unsigned long start,
114					   unsigned long end)
115{
 
116	int i;
117
 
 
118	if (!vmacache_valid(mm))
119		return NULL;
120
121	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
122
123	for (i = 0; i < VMACACHE_SIZE; i++) {
124		struct vm_area_struct *vma = current->vmacache[i];
125
126		if (vma && vma->vm_start == start && vma->vm_end == end) {
127			count_vm_vmacache_event(VMACACHE_FIND_HITS);
128			return vma;
129		}
 
 
130	}
131
132	return NULL;
133}
134#endif