Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 *
 14 * This code maintains the "home" for each page in the system.
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/spinlock.h>
 20#include <linux/list.h>
 21#include <linux/bootmem.h>
 22#include <linux/rmap.h>
 23#include <linux/pagemap.h>
 24#include <linux/mutex.h>
 25#include <linux/interrupt.h>
 26#include <linux/sysctl.h>
 27#include <linux/pagevec.h>
 28#include <linux/ptrace.h>
 29#include <linux/timex.h>
 30#include <linux/cache.h>
 31#include <linux/smp.h>
 32#include <linux/module.h>
 33#include <linux/hugetlb.h>
 34
 35#include <asm/page.h>
 36#include <asm/sections.h>
 37#include <asm/tlbflush.h>
 38#include <asm/pgalloc.h>
 39#include <asm/homecache.h>
 40
 41#include <arch/sim.h>
 42
 43#include "migrate.h"
 44
 45
 46#if CHIP_HAS_COHERENT_LOCAL_CACHE()
 47
 48/*
 49 * The noallocl2 option suppresses all use of the L2 cache to cache
 50 * locally from a remote home.  There's no point in using it if we
 51 * don't have coherent local caching, though.
 52 */
 53static int __write_once noallocl2;
 54static int __init set_noallocl2(char *str)
 55{
 56	noallocl2 = 1;
 57	return 0;
 58}
 59early_param("noallocl2", set_noallocl2);
 60
 61#else
 62
 63#define noallocl2 0
 64
 65#endif
 66
 67/* Provide no-op versions of these routines to keep flush_remote() cleaner. */
 68#define mark_caches_evicted_start() 0
 69#define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
 70
 71
 72/*
 73 * Update the irq_stat for cpus that we are going to interrupt
 74 * with TLB or cache flushes.  Also handle removing dataplane cpus
 75 * from the TLB flush set, and setting dataplane_tlb_state instead.
 76 */
 77static void hv_flush_update(const struct cpumask *cache_cpumask,
 78			    struct cpumask *tlb_cpumask,
 79			    unsigned long tlb_va, unsigned long tlb_length,
 80			    HV_Remote_ASID *asids, int asidcount)
 81{
 82	struct cpumask mask;
 83	int i, cpu;
 84
 85	cpumask_clear(&mask);
 86	if (cache_cpumask)
 87		cpumask_or(&mask, &mask, cache_cpumask);
 88	if (tlb_cpumask && tlb_length) {
 89		cpumask_or(&mask, &mask, tlb_cpumask);
 90	}
 91
 92	for (i = 0; i < asidcount; ++i)
 93		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
 94
 95	/*
 96	 * Don't bother to update atomically; losing a count
 97	 * here is not that critical.
 98	 */
 99	for_each_cpu(cpu, &mask)
100		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
101}
102
103/*
104 * This wrapper function around hv_flush_remote() does several things:
105 *
106 *  - Provides a return value error-checking panic path, since
107 *    there's never any good reason for hv_flush_remote() to fail.
108 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
109 *    is the type that Linux wants to pass around anyway.
110 *  - Centralizes the mark_caches_evicted() handling.
111 *  - Canonicalizes that lengths of zero make cpumasks NULL.
112 *  - Handles deferring TLB flushes for dataplane tiles.
113 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
114 *
115 * Note that we have to wait until the cache flush completes before
116 * updating the per-cpu last_cache_flush word, since otherwise another
117 * concurrent flush can race, conclude the flush has already
118 * completed, and start to use the page while it's still dirty
119 * remotely (running concurrently with the actual evict, presumably).
120 */
121void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
122		  const struct cpumask *cache_cpumask_orig,
123		  HV_VirtAddr tlb_va, unsigned long tlb_length,
124		  unsigned long tlb_pgsize,
125		  const struct cpumask *tlb_cpumask_orig,
126		  HV_Remote_ASID *asids, int asidcount)
127{
128	int rc;
129	int timestamp = 0;  /* happy compiler */
130	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
131	struct cpumask *cache_cpumask, *tlb_cpumask;
132	HV_PhysAddr cache_pa;
133	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];
134
135	mb();   /* provided just to simplify "magic hypervisor" mode */
136
137	/*
138	 * Canonicalize and copy the cpumasks.
139	 */
140	if (cache_cpumask_orig && cache_control) {
141		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
142		cache_cpumask = &cache_cpumask_copy;
143	} else {
144		cpumask_clear(&cache_cpumask_copy);
145		cache_cpumask = NULL;
146	}
147	if (cache_cpumask == NULL)
148		cache_control = 0;
149	if (tlb_cpumask_orig && tlb_length) {
150		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
151		tlb_cpumask = &tlb_cpumask_copy;
152	} else {
153		cpumask_clear(&tlb_cpumask_copy);
154		tlb_cpumask = NULL;
155	}
156
157	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
158			asids, asidcount);
159	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
160	if (cache_control & HV_FLUSH_EVICT_L2)
161		timestamp = mark_caches_evicted_start();
162	rc = hv_flush_remote(cache_pa, cache_control,
163			     cpumask_bits(cache_cpumask),
164			     tlb_va, tlb_length, tlb_pgsize,
165			     cpumask_bits(tlb_cpumask),
166			     asids, asidcount);
167	if (cache_control & HV_FLUSH_EVICT_L2)
168		mark_caches_evicted_finish(cache_cpumask, timestamp);
169	if (rc == 0)
170		return;
171	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
172	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
173
174	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
175	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
176	       cache_pa, cache_control, cache_cpumask, cache_buf,
177	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
178	       tlb_cpumask, tlb_buf,
179	       asids, asidcount, rc);
180	panic("Unsafe to continue.");
181}
182
183void flush_remote_page(struct page *page, int order)
184{
185	int i, pages = (1 << order);
186	for (i = 0; i < pages; ++i, ++page) {
187		void *p = kmap_atomic(page);
188		int hfh = 0;
189		int home = page_home(page);
190#if CHIP_HAS_CBOX_HOME_MAP()
191		if (home == PAGE_HOME_HASH)
192			hfh = 1;
193		else
194#endif
195			BUG_ON(home < 0 || home >= NR_CPUS);
196		finv_buffer_remote(p, PAGE_SIZE, hfh);
197		kunmap_atomic(p);
198	}
199}
200
201void homecache_evict(const struct cpumask *mask)
202{
203	flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
204}
205
206/*
207 * Return a mask of the cpus whose caches currently own these pages.
208 * The return value is whether the pages are all coherently cached
209 * (i.e. none are immutable, incoherent, or uncached).
210 */
211static int homecache_mask(struct page *page, int pages,
212			  struct cpumask *home_mask)
213{
214	int i;
215	int cached_coherently = 1;
216	cpumask_clear(home_mask);
217	for (i = 0; i < pages; ++i) {
218		int home = page_home(&page[i]);
219		if (home == PAGE_HOME_IMMUTABLE ||
220		    home == PAGE_HOME_INCOHERENT) {
221			cpumask_copy(home_mask, cpu_possible_mask);
222			return 0;
223		}
224#if CHIP_HAS_CBOX_HOME_MAP()
225		if (home == PAGE_HOME_HASH) {
226			cpumask_or(home_mask, home_mask, &hash_for_home_map);
227			continue;
228		}
229#endif
230		if (home == PAGE_HOME_UNCACHED) {
231			cached_coherently = 0;
232			continue;
233		}
234		BUG_ON(home < 0 || home >= NR_CPUS);
235		cpumask_set_cpu(home, home_mask);
236	}
237	return cached_coherently;
238}
239
240/*
241 * Return the passed length, or zero if it's long enough that we
242 * believe we should evict the whole L2 cache.
243 */
244static unsigned long cache_flush_length(unsigned long length)
245{
246	return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
247}
248
249/* Flush a page out of whatever cache(s) it is in. */
250void homecache_flush_cache(struct page *page, int order)
251{
252	int pages = 1 << order;
253	int length = cache_flush_length(pages * PAGE_SIZE);
254	unsigned long pfn = page_to_pfn(page);
255	struct cpumask home_mask;
256
257	homecache_mask(page, pages, &home_mask);
258	flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
259	sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
260}
261
262
263/* Report the home corresponding to a given PTE. */
264static int pte_to_home(pte_t pte)
265{
266	if (hv_pte_get_nc(pte))
267		return PAGE_HOME_IMMUTABLE;
268	switch (hv_pte_get_mode(pte)) {
269	case HV_PTE_MODE_CACHE_TILE_L3:
270		return get_remote_cache_cpu(pte);
271	case HV_PTE_MODE_CACHE_NO_L3:
272		return PAGE_HOME_INCOHERENT;
273	case HV_PTE_MODE_UNCACHED:
274		return PAGE_HOME_UNCACHED;
275#if CHIP_HAS_CBOX_HOME_MAP()
276	case HV_PTE_MODE_CACHE_HASH_L3:
277		return PAGE_HOME_HASH;
278#endif
279	}
280	panic("Bad PTE %#llx\n", pte.val);
281}
282
283/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
284pte_t pte_set_home(pte_t pte, int home)
285{
286	/* Check for non-linear file mapping "PTEs" and pass them through. */
287	if (pte_file(pte))
288		return pte;
289
290#if CHIP_HAS_MMIO()
291	/* Check for MMIO mappings and pass them through. */
292	if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
293		return pte;
294#endif
295
296
297	/*
298	 * Only immutable pages get NC mappings.  If we have a
299	 * non-coherent PTE, but the underlying page is not
300	 * immutable, it's likely the result of a forced
301	 * caching setting running up against ptrace setting
302	 * the page to be writable underneath.  In this case,
303	 * just keep the PTE coherent.
304	 */
305	if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
306		pte = hv_pte_clear_nc(pte);
307		pr_err("non-immutable page incoherently referenced: %#llx\n",
308		       pte.val);
309	}
310
311	switch (home) {
312
313	case PAGE_HOME_UNCACHED:
314		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
315		break;
316
317	case PAGE_HOME_INCOHERENT:
318		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
319		break;
320
321	case PAGE_HOME_IMMUTABLE:
322		/*
323		 * We could home this page anywhere, since it's immutable,
324		 * but by default just home it to follow "hash_default".
325		 */
326		BUG_ON(hv_pte_get_writable(pte));
327		if (pte_get_forcecache(pte)) {
328			/* Upgrade "force any cpu" to "No L3" for immutable. */
329			if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
330			    && pte_get_anyhome(pte)) {
331				pte = hv_pte_set_mode(pte,
332						      HV_PTE_MODE_CACHE_NO_L3);
333			}
334		} else
335#if CHIP_HAS_CBOX_HOME_MAP()
336		if (hash_default)
337			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
338		else
339#endif
340			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
341		pte = hv_pte_set_nc(pte);
342		break;
343
344#if CHIP_HAS_CBOX_HOME_MAP()
345	case PAGE_HOME_HASH:
346		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
347		break;
348#endif
349
350	default:
351		BUG_ON(home < 0 || home >= NR_CPUS ||
352		       !cpu_is_valid_lotar(home));
353		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
354		pte = set_remote_cache_cpu(pte, home);
355		break;
356	}
357
358#if CHIP_HAS_NC_AND_NOALLOC_BITS()
359	if (noallocl2)
360		pte = hv_pte_set_no_alloc_l2(pte);
361
362	/* Simplify "no local and no l3" to "uncached" */
363	if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
364	    hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
365		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
366	}
367#endif
368
369	/* Checking this case here gives a better panic than from the hv. */
370	BUG_ON(hv_pte_get_mode(pte) == 0);
371
372	return pte;
373}
374EXPORT_SYMBOL(pte_set_home);
375
376/*
377 * The routines in this section are the "static" versions of the normal
378 * dynamic homecaching routines; they just set the home cache
379 * of a kernel page once, and require a full-chip cache/TLB flush,
380 * so they're not suitable for anything but infrequent use.
381 */
382
383#if CHIP_HAS_CBOX_HOME_MAP()
384static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
385#else
386static inline int initial_page_home(void) { return 0; }
387#endif
388
389int page_home(struct page *page)
390{
391	if (PageHighMem(page)) {
392		return initial_page_home();
393	} else {
394		unsigned long kva = (unsigned long)page_address(page);
395		return pte_to_home(*virt_to_pte(NULL, kva));
396	}
397}
398EXPORT_SYMBOL(page_home);
399
400void homecache_change_page_home(struct page *page, int order, int home)
401{
402	int i, pages = (1 << order);
403	unsigned long kva;
404
405	BUG_ON(PageHighMem(page));
406	BUG_ON(page_count(page) > 1);
407	BUG_ON(page_mapcount(page) != 0);
408	kva = (unsigned long) page_address(page);
409	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
410		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
411		     NULL, 0);
412
413	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
414		pte_t *ptep = virt_to_pte(NULL, kva);
415		pte_t pteval = *ptep;
416		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
417		__set_pte(ptep, pte_set_home(pteval, home));
418	}
419}
420
421struct page *homecache_alloc_pages(gfp_t gfp_mask,
422				   unsigned int order, int home)
423{
424	struct page *page;
425	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
426	page = alloc_pages(gfp_mask, order);
427	if (page)
428		homecache_change_page_home(page, order, home);
429	return page;
430}
431EXPORT_SYMBOL(homecache_alloc_pages);
432
433struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
434					unsigned int order, int home)
435{
436	struct page *page;
437	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
438	page = alloc_pages_node(nid, gfp_mask, order);
439	if (page)
440		homecache_change_page_home(page, order, home);
441	return page;
442}
443
444void homecache_free_pages(unsigned long addr, unsigned int order)
445{
446	struct page *page;
447
448	if (addr == 0)
449		return;
450
451	VM_BUG_ON(!virt_addr_valid((void *)addr));
452	page = virt_to_page((void *)addr);
453	if (put_page_testzero(page)) {
454		homecache_change_page_home(page, order, initial_page_home());
455		if (order == 0) {
456			free_hot_cold_page(page, 0);
457		} else {
458			init_page_count(page);
459			__free_pages(page, order);
460		}
461	}
462}