Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Lockless get_user_pages_fast for powerpc
  3 *
  4 * Copyright (C) 2008 Nick Piggin
  5 * Copyright (C) 2008 Novell Inc.
  6 */
  7#undef DEBUG
  8
  9#include <linux/sched.h>
 10#include <linux/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/vmstat.h>
 13#include <linux/pagemap.h>
 14#include <linux/rwsem.h>
 15#include <asm/pgtable.h>
 16
 17#ifdef __HAVE_ARCH_PTE_SPECIAL
 18
 19/*
 20 * The performance critical leaf functions are made noinline otherwise gcc
 21 * inlines everything into a single function which results in too much
 22 * register pressure.
 23 */
 24static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
 25		unsigned long end, int write, struct page **pages, int *nr)
 26{
 27	unsigned long mask, result;
 28	pte_t *ptep;
 29
 30	result = _PAGE_PRESENT|_PAGE_USER;
 31	if (write)
 32		result |= _PAGE_RW;
 33	mask = result | _PAGE_SPECIAL;
 34
 35	ptep = pte_offset_kernel(&pmd, addr);
 36	do {
 37		pte_t pte = ACCESS_ONCE(*ptep);
 38		struct page *page;
 39		/*
 40		 * Similar to the PMD case, NUMA hinting must take slow path
 41		 */
 42		if (pte_numa(pte))
 43			return 0;
 44
 45		if ((pte_val(pte) & mask) != result)
 46			return 0;
 47		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 48		page = pte_page(pte);
 49		if (!page_cache_get_speculative(page))
 50			return 0;
 51		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
 52			put_page(page);
 53			return 0;
 54		}
 55		pages[*nr] = page;
 56		(*nr)++;
 57
 58	} while (ptep++, addr += PAGE_SIZE, addr != end);
 59
 60	return 1;
 61}
 62
 63static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 64		int write, struct page **pages, int *nr)
 65{
 66	unsigned long next;
 67	pmd_t *pmdp;
 68
 69	pmdp = pmd_offset(&pud, addr);
 70	do {
 71		pmd_t pmd = ACCESS_ONCE(*pmdp);
 72
 73		next = pmd_addr_end(addr, end);
 74		/*
 75		 * If we find a splitting transparent hugepage we
 76		 * return zero. That will result in taking the slow
 77		 * path which will call wait_split_huge_page()
 78		 * if the pmd is still in splitting state
 79		 */
 80		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 81			return 0;
 82		if (pmd_huge(pmd) || pmd_large(pmd)) {
 83			/*
 84			 * NUMA hinting faults need to be handled in the GUP
 85			 * slowpath for accounting purposes and so that they
 86			 * can be serialised against THP migration.
 87			 */
 88			if (pmd_numa(pmd))
 89				return 0;
 90
 91			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
 92					 write, pages, nr))
 93				return 0;
 94		} else if (is_hugepd(pmdp)) {
 95			if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
 96					addr, next, write, pages, nr))
 97				return 0;
 98		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
 99			return 0;
100	} while (pmdp++, addr = next, addr != end);
101
102	return 1;
103}
104
105static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
106		int write, struct page **pages, int *nr)
107{
108	unsigned long next;
109	pud_t *pudp;
110
111	pudp = pud_offset(&pgd, addr);
112	do {
113		pud_t pud = ACCESS_ONCE(*pudp);
114
115		next = pud_addr_end(addr, end);
116		if (pud_none(pud))
117			return 0;
118		if (pud_huge(pud)) {
119			if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
120					 write, pages, nr))
121				return 0;
122		} else if (is_hugepd(pudp)) {
123			if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
124					addr, next, write, pages, nr))
125				return 0;
126		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
127			return 0;
128	} while (pudp++, addr = next, addr != end);
129
130	return 1;
131}
132
133int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
134			  struct page **pages)
135{
136	struct mm_struct *mm = current->mm;
137	unsigned long addr, len, end;
138	unsigned long next;
139	unsigned long flags;
140	pgd_t *pgdp;
141	int nr = 0;
142
143	pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
144
145	start &= PAGE_MASK;
146	addr = start;
147	len = (unsigned long) nr_pages << PAGE_SHIFT;
148	end = start + len;
149
150	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
151					start, len)))
152		return 0;
153
154	pr_devel("  aligned: %lx .. %lx\n", start, end);
155
156	/*
157	 * XXX: batch / limit 'nr', to avoid large irq off latency
158	 * needs some instrumenting to determine the common sizes used by
159	 * important workloads (eg. DB2), and whether limiting the batch size
160	 * will decrease performance.
161	 *
162	 * It seems like we're in the clear for the moment. Direct-IO is
163	 * the main guy that batches up lots of get_user_pages, and even
164	 * they are limited to 64-at-a-time which is not so many.
165	 */
166	/*
167	 * This doesn't prevent pagetable teardown, but does prevent
168	 * the pagetables from being freed on powerpc.
169	 *
170	 * So long as we atomically load page table pointers versus teardown,
171	 * we can follow the address down to the the page and take a ref on it.
172	 */
173	local_irq_save(flags);
174
175	pgdp = pgd_offset(mm, addr);
176	do {
177		pgd_t pgd = ACCESS_ONCE(*pgdp);
178
179		pr_devel("  %016lx: normal pgd %p\n", addr,
180			 (void *)pgd_val(pgd));
181		next = pgd_addr_end(addr, end);
182		if (pgd_none(pgd))
183			break;
184		if (pgd_huge(pgd)) {
185			if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
186					 write, pages, &nr))
187				break;
188		} else if (is_hugepd(pgdp)) {
189			if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
190					addr, next, write, pages, &nr))
191				break;
192		} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
193			break;
194	} while (pgdp++, addr = next, addr != end);
195
196	local_irq_restore(flags);
197
198	return nr;
199}
200
201int get_user_pages_fast(unsigned long start, int nr_pages, int write,
202			struct page **pages)
203{
204	struct mm_struct *mm = current->mm;
205	int nr, ret;
206
207	start &= PAGE_MASK;
208	nr = __get_user_pages_fast(start, nr_pages, write, pages);
209	ret = nr;
210
211	if (nr < nr_pages) {
212		pr_devel("  slow path ! nr = %d\n", nr);
213
214		/* Try to get the remaining pages with get_user_pages */
215		start += nr << PAGE_SHIFT;
216		pages += nr;
217
218		down_read(&mm->mmap_sem);
219		ret = get_user_pages(current, mm, start,
220				     nr_pages - nr, write, 0, pages, NULL);
221		up_read(&mm->mmap_sem);
222
223		/* Have to be a bit careful with return values */
224		if (nr > 0) {
225			if (ret < 0)
226				ret = nr;
227			else
228				ret += nr;
229		}
230	}
231
232	return ret;
233}
234
235#endif /* __HAVE_ARCH_PTE_SPECIAL */