Linux Audio

Check our new training course

Loading...
v4.17
  1#
  2# arch/arm64/Makefile
  3#
  4# This file is included by the global makefile so that you can add your own
  5# architecture-specific flags and dependencies.
  6#
  7# This file is subject to the terms and conditions of the GNU General Public
  8# License.  See the file "COPYING" in the main directory of this archive
  9# for more details.
 10#
 11# Copyright (C) 1995-2001 by Russell King
 12
 13LDFLAGS_vmlinux	:=-p --no-undefined -X
 14CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 15GZFLAGS		:=-9
 16
 17ifeq ($(CONFIG_RELOCATABLE), y)
 18# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
 19# for relative relocs, since this leads to better Image compression
 20# with the relocation offsets always being zero.
 21LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic \
 22			$(call ld-option, --no-apply-dynamic-relocs)
 23endif
 24
 25ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
 26  ifeq ($(call ld-option, --fix-cortex-a53-843419),)
 27$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
 28  else
 29LDFLAGS_vmlinux	+= --fix-cortex-a53-843419
 30  endif
 31endif
 32
 33KBUILD_DEFCONFIG := defconfig
 
 
 
 
 34
 35# Check for binutils support for specific extensions
 36lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
 37
 38ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
 39  ifeq ($(lseinstr),)
 40$(warning LSE atomics not supported by binutils)
 41  endif
 42endif
 43
 44ifeq ($(CONFIG_ARM64), y)
 45brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
 46
 47  ifneq ($(brokengasinst),)
 48$(warning Detected assembler with broken .inst; disassembly will be unreliable)
 49  endif
 50endif
 51
 52KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr) $(brokengasinst)
 53KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
 54KBUILD_AFLAGS	+= $(lseinstr) $(brokengasinst)
 
 55
 56KBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
 57KBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
 58
 59ifeq ($(cc-name),clang)
 60KBUILD_CFLAGS	+= -DCONFIG_ARCH_SUPPORTS_INT128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61else
 62KBUILD_CFLAGS	+= $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63endif
 64
 65ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 66KBUILD_CPPFLAGS	+= -mbig-endian
 67CHECKFLAGS	+= -D__AARCH64EB__
 68AS		+= -EB
 69LD		+= -EB
 70LDFLAGS		+= -maarch64linuxb
 71UTS_MACHINE	:= aarch64_be
 72else
 73KBUILD_CPPFLAGS	+= -mlittle-endian
 74CHECKFLAGS	+= -D__AARCH64EL__
 75AS		+= -EL
 76LD		+= -EL
 77LDFLAGS		+= -maarch64linux
 78UTS_MACHINE	:= aarch64
 79endif
 80
 81CHECKFLAGS	+= -D__aarch64__ -m64
 82
 83ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
 84KBUILD_LDFLAGS_MODULE	+= -T $(srctree)/arch/arm64/kernel/module.lds
 85endif
 86
 87# Default value
 88head-y		:= arch/arm64/kernel/head.o
 89
 90# The byte offset of the kernel image in RAM from the start of RAM.
 91ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
 92TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
 93		 int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
 94		 rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 95else
 96TEXT_OFFSET := 0x00080000
 97endif
 98
 99# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
100#				 - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
101# in 32-bit arithmetic
102KASAN_SHADOW_SCALE_SHIFT := 3
103KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
104	(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
105	+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
106	- (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
107
108export	TEXT_OFFSET GZFLAGS
109
110core-y		+= arch/arm64/kernel arch/arm64/mm/
111core-$(CONFIG_NET) += arch/arm64/net/
112core-$(CONFIG_KVM) += arch/arm64/kvm/
113core-$(CONFIG_XEN) += arch/arm64/xen/
114core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
115libs-y		:= arch/arm64/lib/ $(libs-y)
116core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
117
118# Default target when executing plain make
119boot		:= arch/arm64/boot
 
 
120KBUILD_IMAGE	:= $(boot)/Image.gz
121KBUILD_DTBS	:= dtbs
 
 
122
123all:	Image.gz $(KBUILD_DTBS)
124
125
126Image: vmlinux
127	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
128
129Image.%: Image
130	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
131
132zinstall install:
133	$(Q)$(MAKE) $(build)=$(boot) $@
134
135%.dtb: scripts
136	$(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
137
138PHONY += dtbs dtbs_install
139
140dtbs: prepare scripts
141	$(Q)$(MAKE) $(build)=$(boot)/dts
142
143dtbs_install:
144	$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
145
146PHONY += vdso_install
147vdso_install:
148	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
 
 
149
150# We use MRPROPER_FILES and CLEAN_FILES now
151archclean:
152	$(Q)$(MAKE) $(clean)=$(boot)
153	$(Q)$(MAKE) $(clean)=$(boot)/dts
 
 
 
 
 
 
 
 
154
 
155# We need to generate vdso-offsets.h before compiling certain files in kernel/.
156# In order to do that, we should use the archprepare target, but we can't since
157# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
158# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
159# Therefore we need to generate the header after prepare0 has been made, hence
160# this hack.
161prepare: vdso_prepare
162vdso_prepare: prepare0
163	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
 
 
 
 
 
 
164
165define archhelp
166  echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
167  echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
168  echo  '* dtbs          - Build device tree blobs for enabled boards'
169  echo  '  dtbs_install  - Install dtbs to $(INSTALL_DTBS_PATH)'
170  echo  '  install       - Install uncompressed kernel'
171  echo  '  zinstall      - Install compressed kernel'
172  echo  '                  Install using (your) ~/bin/installkernel or'
173  echo  '                  (distribution) /sbin/installkernel or'
174  echo  '                  install to $$(INSTALL_PATH) and run lilo'
175endef
v6.2
  1#
  2# arch/arm64/Makefile
  3#
  4# This file is included by the global makefile so that you can add your own
  5# architecture-specific flags and dependencies.
  6#
  7# This file is subject to the terms and conditions of the GNU General Public
  8# License.  See the file "COPYING" in the main directory of this archive
  9# for more details.
 10#
 11# Copyright (C) 1995-2001 by Russell King
 12
 13LDFLAGS_vmlinux	:=--no-undefined -X
 
 
 14
 15ifeq ($(CONFIG_RELOCATABLE), y)
 16# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
 17# for relative relocs, since this leads to better Image compression
 18# with the relocation offsets always being zero.
 19LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext \
 20			$(call ld-option, --no-apply-dynamic-relocs)
 21endif
 22
 23ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
 24  ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
 
 
 25LDFLAGS_vmlinux	+= --fix-cortex-a53-843419
 26  endif
 27endif
 28
 29cc_has_k_constraint := $(call try-run,echo				\
 30	'int main(void) {						\
 31		asm volatile("and w0, w0, %w0" :: "K" (4294967295));	\
 32		return 0;						\
 33	}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
 34
 35ifeq ($(CONFIG_BROKEN_GAS_INST),y)
 
 
 
 
 
 
 
 
 
 
 
 
 36$(warning Detected assembler with broken .inst; disassembly will be unreliable)
 
 37endif
 38
 39KBUILD_CFLAGS	+= -mgeneral-regs-only	\
 40		   $(compat_vdso) $(cc_has_k_constraint)
 41KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
 42KBUILD_AFLAGS	+= $(compat_vdso)
 43
 44KBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
 45KBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
 46
 47# Avoid generating .eh_frame* sections.
 48ifneq ($(CONFIG_UNWIND_TABLES),y)
 49KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
 50KBUILD_AFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
 51else
 52KBUILD_CFLAGS	+= -fasynchronous-unwind-tables
 53KBUILD_AFLAGS	+= -fasynchronous-unwind-tables
 54endif
 55
 56ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
 57prepare: stack_protector_prepare
 58stack_protector_prepare: prepare0
 59	$(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg		  \
 60				-mstack-protector-guard-reg=sp_el0	  \
 61				-mstack-protector-guard-offset=$(shell	  \
 62			awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
 63					include/generated/asm-offsets.h))
 64endif
 65
 66ifeq ($(CONFIG_AS_HAS_ARMV8_2), y)
 67# make sure to pass the newest target architecture to -march.
 68asm-arch := armv8.2-a
 69endif
 70
 71# Ensure that if the compiler supports branch protection we default it
 72# off, this will be overridden if we are using branch protection.
 73branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
 74
 75ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
 76branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
 77# We enable additional protection for leaf functions as there is some
 78# narrow potential for ROP protection benefits and no substantial
 79# performance impact has been observed.
 80PACRET-y := pac-ret+leaf
 81
 82# Using a shadow call stack in leaf functions is too costly, so avoid PAC there
 83# as well when we may be patching PAC into SCS
 84PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret
 85
 86ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
 87branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti
 88else
 89branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y)
 90endif
 91# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
 92# compiler to generate them and consequently to break the single image contract
 93# we pass it only to the assembler. This option is utilized only in case of non
 94# integrated assemblers.
 95ifeq ($(CONFIG_AS_HAS_PAC), y)
 96asm-arch := armv8.3-a
 97endif
 98endif
 99
100KBUILD_CFLAGS += $(branch-prot-flags-y)
101
102ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
103# make sure to pass the newest target architecture to -march.
104asm-arch := armv8.4-a
105endif
106
107ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
108# make sure to pass the newest target architecture to -march.
109asm-arch := armv8.5-a
110endif
111
112ifdef asm-arch
113KBUILD_CFLAGS	+= -Wa,-march=$(asm-arch) \
114		   -DARM64_ASM_ARCH='"$(asm-arch)"'
115endif
116
117ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
118KBUILD_CFLAGS	+= -ffixed-x18
119endif
120
121ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
122KBUILD_CPPFLAGS	+= -mbig-endian
123CHECKFLAGS	+= -D__AARCH64EB__
124# Prefer the baremetal ELF build target, but not all toolchains include
125# it so fall back to the standard linux version if needed.
126KBUILD_LDFLAGS	+= -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
127UTS_MACHINE	:= aarch64_be
128else
129KBUILD_CPPFLAGS	+= -mlittle-endian
130CHECKFLAGS	+= -D__AARCH64EL__
131# Same as above, prefer ELF but fall back to linux target if needed.
132KBUILD_LDFLAGS	+= -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
 
133UTS_MACHINE	:= aarch64
134endif
135
136ifeq ($(CONFIG_LD_IS_LLD), y)
137KBUILD_LDFLAGS	+= -z norelro
 
 
138endif
139
140CHECKFLAGS	+= -D__aarch64__
 
141
142ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
143  KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
144  CC_FLAGS_FTRACE := -fpatchable-function-entry=2
 
 
 
 
145endif
146
147ifeq ($(CONFIG_KASAN_SW_TAGS), y)
148KASAN_SHADOW_SCALE_SHIFT := 4
149else ifeq ($(CONFIG_KASAN_GENERIC), y)
150KASAN_SHADOW_SCALE_SHIFT := 3
151endif
152
153KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
154KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
155KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
156
 
 
 
 
 
 
157libs-y		:= arch/arm64/lib $(libs-y)
158libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
159
160# Default target when executing plain make
161boot		:= arch/arm64/boot
162
163ifeq ($(CONFIG_EFI_ZBOOT),)
164KBUILD_IMAGE	:= $(boot)/Image.gz
165else
166KBUILD_IMAGE	:= $(boot)/vmlinuz.efi
167endif
168
169all:	$(notdir $(KBUILD_IMAGE))
170
171
172Image vmlinuz.efi: vmlinux
173	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
174
175Image.%: Image
176	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
177
178install: KBUILD_IMAGE := $(boot)/Image
179install zinstall:
180	$(call cmd,install)
 
 
 
 
 
 
 
 
 
 
181
182PHONY += vdso_install
183vdso_install:
184	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
185	$(if $(CONFIG_COMPAT_VDSO), \
186		$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
187
188archprepare:
189	$(Q)$(MAKE) $(build)=arch/arm64/tools kapi
190ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
191  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
192	@echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
193  endif
194endif
195ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
196  ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
197	@echo "warning: LSE atomics not supported by binutils" >&2
198  endif
199endif
200
201ifeq ($(KBUILD_EXTMOD),)
202# We need to generate vdso-offsets.h before compiling certain files in kernel/.
203# In order to do that, we should use the archprepare target, but we can't since
204# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
205# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
206# Therefore we need to generate the header after prepare0 has been made, hence
207# this hack.
208prepare: vdso_prepare
209vdso_prepare: prepare0
210	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso \
211	include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
212ifdef CONFIG_COMPAT_VDSO
213	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
214	include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
215endif
216endif
217
218define archhelp
219  echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
220  echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
 
 
221  echo  '  install       - Install uncompressed kernel'
222  echo  '  zinstall      - Install compressed kernel'
223  echo  '                  Install using (your) ~/bin/installkernel or'
224  echo  '                  (distribution) /sbin/installkernel or'
225  echo  '                  install to $$(INSTALL_PATH) and run lilo'
226endef