Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * This code maintains the "home" for each page in the system.
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/spinlock.h>
 20#include <linux/list.h>
 21#include <linux/bootmem.h>
 22#include <linux/rmap.h>
 23#include <linux/pagemap.h>
 24#include <linux/mutex.h>
 25#include <linux/interrupt.h>
 26#include <linux/sysctl.h>
 27#include <linux/pagevec.h>
 28#include <linux/ptrace.h>
 29#include <linux/timex.h>
 30#include <linux/cache.h>
 31#include <linux/smp.h>
 32#include <linux/module.h>
 33#include <linux/hugetlb.h>
 34
 35#include <asm/page.h>
 36#include <asm/sections.h>
 37#include <asm/tlbflush.h>
 38#include <asm/pgalloc.h>
 39#include <asm/homecache.h>
 40
 41#include <arch/sim.h>
 42
 43#include "migrate.h"
 44
 45
 46/*
 47 * The noallocl2 option suppresses all use of the L2 cache to cache
 48 * locally from a remote home.
 49 */
 50static int __write_once noallocl2;
 51static int __init set_noallocl2(char *str)
 52{
 53	noallocl2 = 1;
 54	return 0;
 55}
 56early_param("noallocl2", set_noallocl2);
 57
 58
 59/*
 60 * Update the irq_stat for cpus that we are going to interrupt
 61 * with TLB or cache flushes.  Also handle removing dataplane cpus
 62 * from the TLB flush set, and setting dataplane_tlb_state instead.
 63 */
 64static void hv_flush_update(const struct cpumask *cache_cpumask,
 65			    struct cpumask *tlb_cpumask,
 66			    unsigned long tlb_va, unsigned long tlb_length,
 67			    HV_Remote_ASID *asids, int asidcount)
 68{
 69	struct cpumask mask;
 70	int i, cpu;
 71
 72	cpumask_clear(&mask);
 73	if (cache_cpumask)
 74		cpumask_or(&mask, &mask, cache_cpumask);
 75	if (tlb_cpumask && tlb_length) {
 76		cpumask_or(&mask, &mask, tlb_cpumask);
 77	}
 78
 79	for (i = 0; i < asidcount; ++i)
 80		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
 81
 82	/*
 83	 * Don't bother to update atomically; losing a count
 84	 * here is not that critical.
 85	 */
 86	for_each_cpu(cpu, &mask)
 87		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
 88}
 89
 90/*
 91 * This wrapper function around hv_flush_remote() does several things:
 92 *
 93 *  - Provides a return value error-checking panic path, since
 94 *    there's never any good reason for hv_flush_remote() to fail.
 95 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
 96 *    is the type that Linux wants to pass around anyway.
 97 *  - Canonicalizes that lengths of zero make cpumasks NULL.
 98 *  - Handles deferring TLB flushes for dataplane tiles.
 99 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
100 *
101 * Note that we have to wait until the cache flush completes before
102 * updating the per-cpu last_cache_flush word, since otherwise another
103 * concurrent flush can race, conclude the flush has already
104 * completed, and start to use the page while it's still dirty
105 * remotely (running concurrently with the actual evict, presumably).
106 */
107void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
108		  const struct cpumask *cache_cpumask_orig,
109		  HV_VirtAddr tlb_va, unsigned long tlb_length,
110		  unsigned long tlb_pgsize,
111		  const struct cpumask *tlb_cpumask_orig,
112		  HV_Remote_ASID *asids, int asidcount)
113{
114	int rc;
115	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
116	struct cpumask *cache_cpumask, *tlb_cpumask;
117	HV_PhysAddr cache_pa;
118	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
119
120	mb();   /* provided just to simplify "magic hypervisor" mode */
121
122	/*
123	 * Canonicalize and copy the cpumasks.
124	 */
125	if (cache_cpumask_orig && cache_control) {
126		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
127		cache_cpumask = &cache_cpumask_copy;
128	} else {
129		cpumask_clear(&cache_cpumask_copy);
130		cache_cpumask = NULL;
131	}
132	if (cache_cpumask == NULL)
133		cache_control = 0;
134	if (tlb_cpumask_orig && tlb_length) {
135		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
136		tlb_cpumask = &tlb_cpumask_copy;
137	} else {
138		cpumask_clear(&tlb_cpumask_copy);
139		tlb_cpumask = NULL;
140	}
141
142	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
143			asids, asidcount);
144	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
145	rc = hv_flush_remote(cache_pa, cache_control,
146			     cpumask_bits(cache_cpumask),
147			     tlb_va, tlb_length, tlb_pgsize,
148			     cpumask_bits(tlb_cpumask),
149			     asids, asidcount);
150	if (rc == 0)
151		return;
152	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
153	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
154
155	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
156	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
157	       cache_pa, cache_control, cache_cpumask, cache_buf,
158	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
159	       tlb_cpumask, tlb_buf,
160	       asids, asidcount, rc);
161	panic("Unsafe to continue.");
162}
163
164static void homecache_finv_page_va(void* va, int home)
165{
166	int cpu = get_cpu();
167	if (home == cpu) {
168		finv_buffer_local(va, PAGE_SIZE);
169	} else if (home == PAGE_HOME_HASH) {
170		finv_buffer_remote(va, PAGE_SIZE, 1);
171	} else {
172		BUG_ON(home < 0 || home >= NR_CPUS);
173		finv_buffer_remote(va, PAGE_SIZE, 0);
174	}
175	put_cpu();
176}
177
178void homecache_finv_map_page(struct page *page, int home)
179{
180	unsigned long flags;
181	unsigned long va;
182	pte_t *ptep;
183	pte_t pte;
184
185	if (home == PAGE_HOME_UNCACHED)
186		return;
187	local_irq_save(flags);
188#ifdef CONFIG_HIGHMEM
189	va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
190			   (KM_TYPE_NR * smp_processor_id()));
191#else
192	va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
193#endif
194	ptep = virt_to_kpte(va);
195	pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
196	__set_pte(ptep, pte_set_home(pte, home));
197	homecache_finv_page_va((void *)va, home);
198	__pte_clear(ptep);
199	hv_flush_page(va, PAGE_SIZE);
200#ifdef CONFIG_HIGHMEM
201	kmap_atomic_idx_pop();
202#endif
203	local_irq_restore(flags);
204}
205
206static void homecache_finv_page_home(struct page *page, int home)
207{
208	if (!PageHighMem(page) && home == page_home(page))
209		homecache_finv_page_va(page_address(page), home);
210	else
211		homecache_finv_map_page(page, home);
212}
213
214static inline bool incoherent_home(int home)
215{
216	return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
217}
218
219static void homecache_finv_page_internal(struct page *page, int force_map)
220{
221	int home = page_home(page);
222	if (home == PAGE_HOME_UNCACHED)
223		return;
224	if (incoherent_home(home)) {
225		int cpu;
226		for_each_cpu(cpu, &cpu_cacheable_map)
227			homecache_finv_map_page(page, cpu);
228	} else if (force_map) {
229		/* Force if, e.g., the normal mapping is migrating. */
230		homecache_finv_map_page(page, home);
231	} else {
232		homecache_finv_page_home(page, home);
233	}
234	sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
235}
236
237void homecache_finv_page(struct page *page)
238{
239	homecache_finv_page_internal(page, 0);
240}
241
242void homecache_evict(const struct cpumask *mask)
243{
244	flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
245}
246
247/* Report the home corresponding to a given PTE. */
248static int pte_to_home(pte_t pte)
249{
250	if (hv_pte_get_nc(pte))
251		return PAGE_HOME_IMMUTABLE;
252	switch (hv_pte_get_mode(pte)) {
253	case HV_PTE_MODE_CACHE_TILE_L3:
254		return get_remote_cache_cpu(pte);
255	case HV_PTE_MODE_CACHE_NO_L3:
256		return PAGE_HOME_INCOHERENT;
257	case HV_PTE_MODE_UNCACHED:
258		return PAGE_HOME_UNCACHED;
259	case HV_PTE_MODE_CACHE_HASH_L3:
260		return PAGE_HOME_HASH;
261	}
262	panic("Bad PTE %#llx\n", pte.val);
263}
264
265/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
266pte_t pte_set_home(pte_t pte, int home)
267{
268	/* Check for non-linear file mapping "PTEs" and pass them through. */
269	if (pte_file(pte))
270		return pte;
271
272#if CHIP_HAS_MMIO()
273	/* Check for MMIO mappings and pass them through. */
274	if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
275		return pte;
276#endif
277
278
279	/*
280	 * Only immutable pages get NC mappings.  If we have a
281	 * non-coherent PTE, but the underlying page is not
282	 * immutable, it's likely the result of a forced
283	 * caching setting running up against ptrace setting
284	 * the page to be writable underneath.  In this case,
285	 * just keep the PTE coherent.
286	 */
287	if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
288		pte = hv_pte_clear_nc(pte);
289		pr_err("non-immutable page incoherently referenced: %#llx\n",
290		       pte.val);
291	}
292
293	switch (home) {
294
295	case PAGE_HOME_UNCACHED:
296		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
297		break;
298
299	case PAGE_HOME_INCOHERENT:
300		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
301		break;
302
303	case PAGE_HOME_IMMUTABLE:
304		/*
305		 * We could home this page anywhere, since it's immutable,
306		 * but by default just home it to follow "hash_default".
307		 */
308		BUG_ON(hv_pte_get_writable(pte));
309		if (pte_get_forcecache(pte)) {
310			/* Upgrade "force any cpu" to "No L3" for immutable. */
311			if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
312			    && pte_get_anyhome(pte)) {
313				pte = hv_pte_set_mode(pte,
314						      HV_PTE_MODE_CACHE_NO_L3);
315			}
316		} else
317		if (hash_default)
318			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
319		else
320			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
321		pte = hv_pte_set_nc(pte);
322		break;
323
324	case PAGE_HOME_HASH:
325		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
326		break;
327
328	default:
329		BUG_ON(home < 0 || home >= NR_CPUS ||
330		       !cpu_is_valid_lotar(home));
331		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
332		pte = set_remote_cache_cpu(pte, home);
333		break;
334	}
335
336	if (noallocl2)
337		pte = hv_pte_set_no_alloc_l2(pte);
338
339	/* Simplify "no local and no l3" to "uncached" */
340	if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
341	    hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
342		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
343	}
344
345	/* Checking this case here gives a better panic than from the hv. */
346	BUG_ON(hv_pte_get_mode(pte) == 0);
347
348	return pte;
349}
350EXPORT_SYMBOL(pte_set_home);
351
352/*
353 * The routines in this section are the "static" versions of the normal
354 * dynamic homecaching routines; they just set the home cache
355 * of a kernel page once, and require a full-chip cache/TLB flush,
356 * so they're not suitable for anything but infrequent use.
357 */
358
359int page_home(struct page *page)
360{
361	if (PageHighMem(page)) {
362		return PAGE_HOME_HASH;
363	} else {
364		unsigned long kva = (unsigned long)page_address(page);
365		return pte_to_home(*virt_to_kpte(kva));
366	}
367}
368EXPORT_SYMBOL(page_home);
369
370void homecache_change_page_home(struct page *page, int order, int home)
371{
372	int i, pages = (1 << order);
373	unsigned long kva;
374
375	BUG_ON(PageHighMem(page));
376	BUG_ON(page_count(page) > 1);
377	BUG_ON(page_mapcount(page) != 0);
378	kva = (unsigned long) page_address(page);
379	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
380		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
381		     NULL, 0);
382
383	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
384		pte_t *ptep = virt_to_kpte(kva);
385		pte_t pteval = *ptep;
386		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
387		__set_pte(ptep, pte_set_home(pteval, home));
388	}
389}
390EXPORT_SYMBOL(homecache_change_page_home);
391
392struct page *homecache_alloc_pages(gfp_t gfp_mask,
393				   unsigned int order, int home)
394{
395	struct page *page;
396	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
397	page = alloc_pages(gfp_mask, order);
398	if (page)
399		homecache_change_page_home(page, order, home);
400	return page;
401}
402EXPORT_SYMBOL(homecache_alloc_pages);
403
404struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
405					unsigned int order, int home)
406{
407	struct page *page;
408	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
409	page = alloc_pages_node(nid, gfp_mask, order);
410	if (page)
411		homecache_change_page_home(page, order, home);
412	return page;
413}
414
415void __homecache_free_pages(struct page *page, unsigned int order)
416{
417	if (put_page_testzero(page)) {
418		homecache_change_page_home(page, order, PAGE_HOME_HASH);
419		if (order == 0) {
420			free_hot_cold_page(page, 0);
421		} else {
422			init_page_count(page);
423			__free_pages(page, order);
424		}
425	}
426}
427EXPORT_SYMBOL(__homecache_free_pages);
428
429void homecache_free_pages(unsigned long addr, unsigned int order)
430{
431	if (addr != 0) {
432		VM_BUG_ON(!virt_addr_valid((void *)addr));
433		__homecache_free_pages(virt_to_page((void *)addr), order);
434	}
435}
436EXPORT_SYMBOL(homecache_free_pages);