Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 *  Lockless get_user_pages_fast for s390
  3 *
  4 *  Copyright IBM Corp. 2010
  5 *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6 */
  7#include <linux/sched.h>
  8#include <linux/mm.h>
  9#include <linux/hugetlb.h>
 10#include <linux/vmstat.h>
 11#include <linux/pagemap.h>
 12#include <linux/rwsem.h>
 13#include <asm/pgtable.h>
 14
 15/*
 16 * The performance critical leaf functions are made noinline otherwise gcc
 17 * inlines everything into a single function which results in too much
 18 * register pressure.
 19 */
 20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
 21		unsigned long end, int write, struct page **pages, int *nr)
 22{
 23	struct page *head, *page;
 24	unsigned long mask;
 25	pte_t *ptep, pte;
 26
 27	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
 28
 29	ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
 30	do {
 31		pte = *ptep;
 32		barrier();
 33		/* Similar to the PMD case, NUMA hinting must take slow path */
 34		if (pte_protnone(pte))
 35			return 0;
 36		if ((pte_val(pte) & mask) != 0)
 37			return 0;
 38		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 39		page = pte_page(pte);
 40		head = compound_head(page);
 41		if (!page_cache_get_speculative(head))
 42			return 0;
 43		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
 44			put_page(head);
 45			return 0;
 46		}
 47		VM_BUG_ON_PAGE(compound_head(page) != head, page);
 48		pages[*nr] = page;
 49		(*nr)++;
 50
 51	} while (ptep++, addr += PAGE_SIZE, addr != end);
 52
 53	return 1;
 54}
 55
 56static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
 57		unsigned long end, int write, struct page **pages, int *nr)
 58{
 59	unsigned long mask, result;
 60	struct page *head, *page;
 61	int refs;
 62
 63	result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
 64	mask = result | _SEGMENT_ENTRY_INVALID;
 65	if ((pmd_val(pmd) & mask) != result)
 66		return 0;
 67	VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
 68
 69	refs = 0;
 70	head = pmd_page(pmd);
 71	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 72	do {
 73		VM_BUG_ON(compound_head(page) != head);
 74		pages[*nr] = page;
 75		(*nr)++;
 76		page++;
 77		refs++;
 78	} while (addr += PAGE_SIZE, addr != end);
 79
 80	if (!page_cache_add_speculative(head, refs)) {
 81		*nr -= refs;
 82		return 0;
 83	}
 84
 85	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
 86		*nr -= refs;
 87		while (refs--)
 88			put_page(head);
 89		return 0;
 90	}
 91
 92	return 1;
 93}
 94
 95
 96static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
 97		unsigned long end, int write, struct page **pages, int *nr)
 98{
 99	unsigned long next;
100	pmd_t *pmdp, pmd;
101
102	pmdp = (pmd_t *) pudp;
103	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
104		pmdp = (pmd_t *) pud_deref(pud);
105	pmdp += pmd_index(addr);
106	do {
107		pmd = *pmdp;
108		barrier();
109		next = pmd_addr_end(addr, end);
110		if (pmd_none(pmd))
111			return 0;
112		if (unlikely(pmd_large(pmd))) {
113			/*
114			 * NUMA hinting faults need to be handled in the GUP
115			 * slowpath for accounting purposes and so that they
116			 * can be serialised against THP migration.
117			 */
118			if (pmd_protnone(pmd))
119				return 0;
120			if (!gup_huge_pmd(pmdp, pmd, addr, next,
121					  write, pages, nr))
122				return 0;
123		} else if (!gup_pte_range(pmdp, pmd, addr, next,
124					  write, pages, nr))
125			return 0;
126	} while (pmdp++, addr = next, addr != end);
127
128	return 1;
129}
130
131static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
132		unsigned long end, int write, struct page **pages, int *nr)
133{
134	unsigned long next;
135	pud_t *pudp, pud;
136
137	pudp = (pud_t *) pgdp;
138	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
139		pudp = (pud_t *) pgd_deref(pgd);
140	pudp += pud_index(addr);
141	do {
142		pud = *pudp;
143		barrier();
144		next = pud_addr_end(addr, end);
145		if (pud_none(pud))
146			return 0;
147		if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
148			return 0;
149	} while (pudp++, addr = next, addr != end);
150
151	return 1;
152}
153
154/*
155 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
156 * back to the regular GUP.
157 */
158int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
159			  struct page **pages)
160{
161	struct mm_struct *mm = current->mm;
162	unsigned long addr, len, end;
163	unsigned long next, flags;
164	pgd_t *pgdp, pgd;
165	int nr = 0;
166
167	start &= PAGE_MASK;
168	addr = start;
169	len = (unsigned long) nr_pages << PAGE_SHIFT;
170	end = start + len;
171	if ((end <= start) || (end > TASK_SIZE))
172		return 0;
173	/*
174	 * local_irq_save() doesn't prevent pagetable teardown, but does
175	 * prevent the pagetables from being freed on s390.
176	 *
177	 * So long as we atomically load page table pointers versus teardown,
178	 * we can follow the address down to the the page and take a ref on it.
179	 */
180	local_irq_save(flags);
181	pgdp = pgd_offset(mm, addr);
182	do {
183		pgd = *pgdp;
184		barrier();
185		next = pgd_addr_end(addr, end);
186		if (pgd_none(pgd))
187			break;
188		if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
189			break;
190	} while (pgdp++, addr = next, addr != end);
191	local_irq_restore(flags);
192
193	return nr;
194}
195
196/**
197 * get_user_pages_fast() - pin user pages in memory
198 * @start:	starting user address
199 * @nr_pages:	number of pages from start to pin
200 * @write:	whether pages will be written to
201 * @pages:	array that receives pointers to the pages pinned.
202 *		Should be at least nr_pages long.
203 *
204 * Attempt to pin user pages in memory without taking mm->mmap_sem.
205 * If not successful, it will fall back to taking the lock and
206 * calling get_user_pages().
207 *
208 * Returns number of pages pinned. This may be fewer than the number
209 * requested. If nr_pages is 0 or negative, returns 0. If no pages
210 * were pinned, returns -errno.
211 */
212int get_user_pages_fast(unsigned long start, int nr_pages, int write,
213			struct page **pages)
214{
215	int nr, ret;
216
217	might_sleep();
218	start &= PAGE_MASK;
219	nr = __get_user_pages_fast(start, nr_pages, write, pages);
220	if (nr == nr_pages)
221		return nr;
222
223	/* Try to get the remaining pages with get_user_pages */
224	start += nr << PAGE_SHIFT;
225	pages += nr;
226	ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
227	/* Have to be a bit careful with return values */
228	if (nr > 0)
229		ret = (ret < 0) ? nr : ret + nr;
230	return ret;
231}