Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/m68k/mm/cache.c
  4 *
  5 *  Instruction cache handling
  6 *
  7 *  Copyright (C) 1995  Hamish Macdonald
  8 */
  9
 10#include <linux/module.h>
 11#include <asm/cacheflush.h>
 12#include <asm/traps.h>
 13
 14
 15static unsigned long virt_to_phys_slow(unsigned long vaddr)
 16{
 17	if (CPU_IS_060) {
 18		unsigned long paddr;
 19
 20		/* The PLPAR instruction causes an access error if the translation
 21		 * is not possible. To catch this we use the same exception mechanism
 22		 * as for user space accesses in <asm/uaccess.h>. */
 23		asm volatile (".chip 68060\n"
 24			      "1: plpar (%0)\n"
 25			      ".chip 68k\n"
 26			      "2:\n"
 27			      ".section .fixup,\"ax\"\n"
 28			      "   .even\n"
 29			      "3: sub.l %0,%0\n"
 30			      "   jra 2b\n"
 31			      ".previous\n"
 32			      ".section __ex_table,\"a\"\n"
 33			      "   .align 4\n"
 34			      "   .long 1b,3b\n"
 35			      ".previous"
 36			      : "=a" (paddr)
 37			      : "0" (vaddr));
 38		return paddr;
 39	} else if (CPU_IS_040) {
 40		unsigned long mmusr;
 41
 42		asm volatile (".chip 68040\n\t"
 43			      "ptestr (%1)\n\t"
 44			      "movec %%mmusr, %0\n\t"
 45			      ".chip 68k"
 46			      : "=r" (mmusr)
 47			      : "a" (vaddr));
 48
 49		if (mmusr & MMU_R_040)
 50			return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
 51	} else {
 52		WARN_ON_ONCE(!CPU_IS_040_OR_060);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53	}
 54	return 0;
 55}
 56
 57/* Push n pages at kernel virtual address and clear the icache */
 58/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 59void flush_icache_user_range(unsigned long address, unsigned long endaddr)
 60{
 61	if (CPU_IS_COLDFIRE) {
 62		unsigned long start, end;
 63		start = address & ICACHE_SET_MASK;
 64		end = endaddr & ICACHE_SET_MASK;
 65		if (start > end) {
 66			flush_cf_icache(0, end);
 67			end = ICACHE_MAX_ADDR;
 68		}
 69		flush_cf_icache(start, end);
 70	} else if (CPU_IS_040_OR_060) {
 71		address &= PAGE_MASK;
 72
 73		do {
 74			asm volatile ("nop\n\t"
 75				      ".chip 68040\n\t"
 76				      "cpushp %%bc,(%0)\n\t"
 77				      ".chip 68k"
 78				      : : "a" (virt_to_phys_slow(address)));
 79			address += PAGE_SIZE;
 80		} while (address < endaddr);
 81	} else {
 82		unsigned long tmp;
 83		asm volatile ("movec %%cacr,%0\n\t"
 84			      "orw %1,%0\n\t"
 85			      "movec %0,%%cacr"
 86			      : "=&d" (tmp)
 87			      : "di" (FLUSH_I));
 88	}
 89}
 90
 91void flush_icache_range(unsigned long address, unsigned long endaddr)
 92{
 93	set_fc(SUPER_DATA);
 94	flush_icache_user_range(address, endaddr);
 95	set_fc(USER_DATA);
 96}
 97EXPORT_SYMBOL(flush_icache_range);
 98
 99void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
100			     unsigned long addr, int len)
101{
102	if (CPU_IS_COLDFIRE) {
103		unsigned long start, end;
104		start = addr & ICACHE_SET_MASK;
105		end = (addr + len) & ICACHE_SET_MASK;
106		if (start > end) {
107			flush_cf_icache(0, end);
108			end = ICACHE_MAX_ADDR;
109		}
110		flush_cf_icache(start, end);
111
112	} else if (CPU_IS_040_OR_060) {
113		asm volatile ("nop\n\t"
114			      ".chip 68040\n\t"
115			      "cpushp %%bc,(%0)\n\t"
116			      ".chip 68k"
117			      : : "a" (page_to_phys(page)));
118	} else {
119		unsigned long tmp;
120		asm volatile ("movec %%cacr,%0\n\t"
121			      "orw %1,%0\n\t"
122			      "movec %0,%%cacr"
123			      : "=&d" (tmp)
124			      : "di" (FLUSH_I));
125	}
126}
127
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/m68k/mm/cache.c
  4 *
  5 *  Instruction cache handling
  6 *
  7 *  Copyright (C) 1995  Hamish Macdonald
  8 */
  9
 10#include <linux/module.h>
 11#include <asm/pgalloc.h>
 12#include <asm/traps.h>
 13
 14
 15static unsigned long virt_to_phys_slow(unsigned long vaddr)
 16{
 17	if (CPU_IS_060) {
 18		unsigned long paddr;
 19
 20		/* The PLPAR instruction causes an access error if the translation
 21		 * is not possible. To catch this we use the same exception mechanism
 22		 * as for user space accesses in <asm/uaccess.h>. */
 23		asm volatile (".chip 68060\n"
 24			      "1: plpar (%0)\n"
 25			      ".chip 68k\n"
 26			      "2:\n"
 27			      ".section .fixup,\"ax\"\n"
 28			      "   .even\n"
 29			      "3: sub.l %0,%0\n"
 30			      "   jra 2b\n"
 31			      ".previous\n"
 32			      ".section __ex_table,\"a\"\n"
 33			      "   .align 4\n"
 34			      "   .long 1b,3b\n"
 35			      ".previous"
 36			      : "=a" (paddr)
 37			      : "0" (vaddr));
 38		return paddr;
 39	} else if (CPU_IS_040) {
 40		unsigned long mmusr;
 41
 42		asm volatile (".chip 68040\n\t"
 43			      "ptestr (%1)\n\t"
 44			      "movec %%mmusr, %0\n\t"
 45			      ".chip 68k"
 46			      : "=r" (mmusr)
 47			      : "a" (vaddr));
 48
 49		if (mmusr & MMU_R_040)
 50			return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
 51	} else {
 52		unsigned short mmusr;
 53		unsigned long *descaddr;
 54
 55		asm volatile ("ptestr %3,%2@,#7,%0\n\t"
 56			      "pmove %%psr,%1"
 57			      : "=a&" (descaddr), "=m" (mmusr)
 58			      : "a" (vaddr), "d" (get_fs().seg));
 59		if (mmusr & (MMU_I|MMU_B|MMU_L))
 60			return 0;
 61		descaddr = phys_to_virt((unsigned long)descaddr);
 62		switch (mmusr & MMU_NUM) {
 63		case 1:
 64			return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
 65		case 2:
 66			return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
 67		case 3:
 68			return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
 69		}
 70	}
 71	return 0;
 72}
 73
 74/* Push n pages at kernel virtual address and clear the icache */
 75/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 76void flush_icache_range(unsigned long address, unsigned long endaddr)
 77{
 78	if (CPU_IS_COLDFIRE) {
 79		unsigned long start, end;
 80		start = address & ICACHE_SET_MASK;
 81		end = endaddr & ICACHE_SET_MASK;
 82		if (start > end) {
 83			flush_cf_icache(0, end);
 84			end = ICACHE_MAX_ADDR;
 85		}
 86		flush_cf_icache(start, end);
 87	} else if (CPU_IS_040_OR_060) {
 88		address &= PAGE_MASK;
 89
 90		do {
 91			asm volatile ("nop\n\t"
 92				      ".chip 68040\n\t"
 93				      "cpushp %%bc,(%0)\n\t"
 94				      ".chip 68k"
 95				      : : "a" (virt_to_phys_slow(address)));
 96			address += PAGE_SIZE;
 97		} while (address < endaddr);
 98	} else {
 99		unsigned long tmp;
100		asm volatile ("movec %%cacr,%0\n\t"
101			      "orw %1,%0\n\t"
102			      "movec %0,%%cacr"
103			      : "=&d" (tmp)
104			      : "di" (FLUSH_I));
105	}
106}
 
 
 
 
 
 
 
107EXPORT_SYMBOL(flush_icache_range);
108
109void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
110			     unsigned long addr, int len)
111{
112	if (CPU_IS_COLDFIRE) {
113		unsigned long start, end;
114		start = addr & ICACHE_SET_MASK;
115		end = (addr + len) & ICACHE_SET_MASK;
116		if (start > end) {
117			flush_cf_icache(0, end);
118			end = ICACHE_MAX_ADDR;
119		}
120		flush_cf_icache(start, end);
121
122	} else if (CPU_IS_040_OR_060) {
123		asm volatile ("nop\n\t"
124			      ".chip 68040\n\t"
125			      "cpushp %%bc,(%0)\n\t"
126			      ".chip 68k"
127			      : : "a" (page_to_phys(page)));
128	} else {
129		unsigned long tmp;
130		asm volatile ("movec %%cacr,%0\n\t"
131			      "orw %1,%0\n\t"
132			      "movec %0,%%cacr"
133			      : "=&d" (tmp)
134			      : "di" (FLUSH_I));
135	}
136}
137