Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* 
  3 * User address space access functions.
  4 *
  5 * Copyright 1997 Andi Kleen <ak@muc.de>
  6 * Copyright 1997 Linus Torvalds
  7 * Copyright 2002 Andi Kleen <ak@suse.de>
  8 */
  9#include <linux/export.h>
 10#include <linux/uaccess.h>
 11#include <linux/highmem.h>
 12#include <linux/libnvdimm.h>
 13
 14/*
 15 * Zero Userspace
 16 */
 17
 18#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 19/**
 20 * clean_cache_range - write back a cache range with CLWB
 21 * @vaddr:	virtual start address
 22 * @size:	number of bytes to write back
 23 *
 24 * Write back a cache range using the CLWB (cache line write back)
 25 * instruction. Note that @size is internally rounded up to be cache
 26 * line size aligned.
 27 */
 28static void clean_cache_range(void *addr, size_t size)
 29{
 30	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
 31	unsigned long clflush_mask = x86_clflush_size - 1;
 32	void *vend = addr + size;
 33	void *p;
 34
 35	for (p = (void *)((unsigned long)addr & ~clflush_mask);
 36	     p < vend; p += x86_clflush_size)
 37		clwb(p);
 38}
 39
 40void arch_wb_cache_pmem(void *addr, size_t size)
 41{
 42	clean_cache_range(addr, size);
 43}
 44EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 45
 46long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
 47{
 48	unsigned long flushed, dest = (unsigned long) dst;
 49	long rc;
 50
 51	stac();
 52	rc = __copy_user_nocache(dst, src, size);
 53	clac();
 54
 55	/*
 56	 * __copy_user_nocache() uses non-temporal stores for the bulk
 57	 * of the transfer, but we need to manually flush if the
 58	 * transfer is unaligned. A cached memory copy is used when
 59	 * destination or size is not naturally aligned. That is:
 60	 *   - Require 8-byte alignment when size is 8 bytes or larger.
 61	 *   - Require 4-byte alignment when size is 4 bytes.
 62	 */
 63	if (size < 8) {
 64		if (!IS_ALIGNED(dest, 4) || size != 4)
 65			clean_cache_range(dst, size);
 66	} else {
 67		if (!IS_ALIGNED(dest, 8)) {
 68			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
 69			clean_cache_range(dst, 1);
 70		}
 71
 72		flushed = dest - (unsigned long) dst;
 73		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
 74			clean_cache_range(dst + size - 1, 1);
 75	}
 76
 77	return rc;
 78}
 79
 80void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 81{
 82	unsigned long dest = (unsigned long) _dst;
 83	unsigned long source = (unsigned long) _src;
 84
 85	/* cache copy and flush to align dest */
 86	if (!IS_ALIGNED(dest, 8)) {
 87		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
 88
 89		memcpy((void *) dest, (void *) source, len);
 90		clean_cache_range((void *) dest, len);
 91		dest += len;
 92		source += len;
 93		size -= len;
 94		if (!size)
 95			return;
 96	}
 97
 98	/* 4x8 movnti loop */
 99	while (size >= 32) {
100		asm("movq    (%0), %%r8\n"
101		    "movq   8(%0), %%r9\n"
102		    "movq  16(%0), %%r10\n"
103		    "movq  24(%0), %%r11\n"
104		    "movnti  %%r8,   (%1)\n"
105		    "movnti  %%r9,  8(%1)\n"
106		    "movnti %%r10, 16(%1)\n"
107		    "movnti %%r11, 24(%1)\n"
108		    :: "r" (source), "r" (dest)
109		    : "memory", "r8", "r9", "r10", "r11");
110		dest += 32;
111		source += 32;
112		size -= 32;
113	}
114
115	/* 1x8 movnti loop */
116	while (size >= 8) {
117		asm("movq    (%0), %%r8\n"
118		    "movnti  %%r8,   (%1)\n"
119		    :: "r" (source), "r" (dest)
120		    : "memory", "r8");
121		dest += 8;
122		source += 8;
123		size -= 8;
124	}
125
126	/* 1x4 movnti loop */
127	while (size >= 4) {
128		asm("movl    (%0), %%r8d\n"
129		    "movnti  %%r8d,   (%1)\n"
130		    :: "r" (source), "r" (dest)
131		    : "memory", "r8");
132		dest += 4;
133		source += 4;
134		size -= 4;
135	}
136
137	/* cache copy for remaining bytes */
138	if (size) {
139		memcpy((void *) dest, (void *) source, size);
140		clean_cache_range((void *) dest, size);
141	}
142}
143EXPORT_SYMBOL_GPL(__memcpy_flushcache);
 
 
 
 
 
 
 
 
 
144#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* 
  3 * User address space access functions.
  4 *
  5 * Copyright 1997 Andi Kleen <ak@muc.de>
  6 * Copyright 1997 Linus Torvalds
  7 * Copyright 2002 Andi Kleen <ak@suse.de>
  8 */
  9#include <linux/export.h>
 10#include <linux/uaccess.h>
 11#include <linux/highmem.h>
 
 12
 13/*
 14 * Zero Userspace
 15 */
 16
 17#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 18/**
 19 * clean_cache_range - write back a cache range with CLWB
 20 * @vaddr:	virtual start address
 21 * @size:	number of bytes to write back
 22 *
 23 * Write back a cache range using the CLWB (cache line write back)
 24 * instruction. Note that @size is internally rounded up to be cache
 25 * line size aligned.
 26 */
 27static void clean_cache_range(void *addr, size_t size)
 28{
 29	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
 30	unsigned long clflush_mask = x86_clflush_size - 1;
 31	void *vend = addr + size;
 32	void *p;
 33
 34	for (p = (void *)((unsigned long)addr & ~clflush_mask);
 35	     p < vend; p += x86_clflush_size)
 36		clwb(p);
 37}
 38
 39void arch_wb_cache_pmem(void *addr, size_t size)
 40{
 41	clean_cache_range(addr, size);
 42}
 43EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 44
 45long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
 46{
 47	unsigned long flushed, dest = (unsigned long) dst;
 48	long rc = __copy_user_nocache(dst, src, size, 0);
 
 
 
 
 49
 50	/*
 51	 * __copy_user_nocache() uses non-temporal stores for the bulk
 52	 * of the transfer, but we need to manually flush if the
 53	 * transfer is unaligned. A cached memory copy is used when
 54	 * destination or size is not naturally aligned. That is:
 55	 *   - Require 8-byte alignment when size is 8 bytes or larger.
 56	 *   - Require 4-byte alignment when size is 4 bytes.
 57	 */
 58	if (size < 8) {
 59		if (!IS_ALIGNED(dest, 4) || size != 4)
 60			clean_cache_range(dst, size);
 61	} else {
 62		if (!IS_ALIGNED(dest, 8)) {
 63			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
 64			clean_cache_range(dst, 1);
 65		}
 66
 67		flushed = dest - (unsigned long) dst;
 68		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
 69			clean_cache_range(dst + size - 1, 1);
 70	}
 71
 72	return rc;
 73}
 74
 75void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 76{
 77	unsigned long dest = (unsigned long) _dst;
 78	unsigned long source = (unsigned long) _src;
 79
 80	/* cache copy and flush to align dest */
 81	if (!IS_ALIGNED(dest, 8)) {
 82		size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
 83
 84		memcpy((void *) dest, (void *) source, len);
 85		clean_cache_range((void *) dest, len);
 86		dest += len;
 87		source += len;
 88		size -= len;
 89		if (!size)
 90			return;
 91	}
 92
 93	/* 4x8 movnti loop */
 94	while (size >= 32) {
 95		asm("movq    (%0), %%r8\n"
 96		    "movq   8(%0), %%r9\n"
 97		    "movq  16(%0), %%r10\n"
 98		    "movq  24(%0), %%r11\n"
 99		    "movnti  %%r8,   (%1)\n"
100		    "movnti  %%r9,  8(%1)\n"
101		    "movnti %%r10, 16(%1)\n"
102		    "movnti %%r11, 24(%1)\n"
103		    :: "r" (source), "r" (dest)
104		    : "memory", "r8", "r9", "r10", "r11");
105		dest += 32;
106		source += 32;
107		size -= 32;
108	}
109
110	/* 1x8 movnti loop */
111	while (size >= 8) {
112		asm("movq    (%0), %%r8\n"
113		    "movnti  %%r8,   (%1)\n"
114		    :: "r" (source), "r" (dest)
115		    : "memory", "r8");
116		dest += 8;
117		source += 8;
118		size -= 8;
119	}
120
121	/* 1x4 movnti loop */
122	while (size >= 4) {
123		asm("movl    (%0), %%r8d\n"
124		    "movnti  %%r8d,   (%1)\n"
125		    :: "r" (source), "r" (dest)
126		    : "memory", "r8");
127		dest += 4;
128		source += 4;
129		size -= 4;
130	}
131
132	/* cache copy for remaining bytes */
133	if (size) {
134		memcpy((void *) dest, (void *) source, size);
135		clean_cache_range((void *) dest, size);
136	}
137}
138EXPORT_SYMBOL_GPL(__memcpy_flushcache);
139
140void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
141		size_t len)
142{
143	char *from = kmap_atomic(page);
144
145	memcpy_flushcache(to, from + offset, len);
146	kunmap_atomic(from);
147}
148#endif