summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2024-04-23 08:29:31 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-04-29 14:14:16 +0100
commit1036b89580dc611cfb5dfe66af6b35452dfb272c (patch)
tree8af0e6080d0557da7d6bfb095f23022debf50052
parent6b0ef2792c223636a86f2c9c3fcb26502a03d5a7 (diff)
downloadlinux-1036b89580dc611cfb5dfe66af6b35452dfb272c.tar.gz
linux-1036b89580dc611cfb5dfe66af6b35452dfb272c.tar.bz2
linux-1036b89580dc611cfb5dfe66af6b35452dfb272c.zip
ARM: 9385/2: mm: Type-annotate all cache assembly routines
Tag all references to assembly functions with SYM_TYPED_FUNC_START() and SYM_FUNC_END() so they also become CFI-safe. When we add SYM_TYPED_FUNC_START() to assembly calls, a function prototype signature will be emitted into the object file at (pc-4) at the call site, so that the KCFI runtime check can compare this to the expected call. Example: 8011ae38: a540670c .word 0xa540670c 8011ae3c <v7_flush_icache_all>: 8011ae3c: e3a00000 mov r0, #0 8011ae40: ee070f11 mcr 15, 0, r0, cr7, cr1, {0} 8011ae44: e12fff1e bx lr This means no "fallthrough" code can enter a SYM_TYPED_FUNC_START() call from above it: there will be a function prototype signature there, so those are consistently converted to a branch or ret lr depending on context. Tested-by: Kees Cook <keescook@chromium.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
-rw-r--r--arch/arm/mm/cache-fa.S39
-rw-r--r--arch/arm/mm/cache-nop.S51
-rw-r--r--arch/arm/mm/cache-v4.S47
-rw-r--r--arch/arm/mm/cache-v4wb.S39
-rw-r--r--arch/arm/mm/cache-v4wt.S47
-rw-r--r--arch/arm/mm/cache-v6.S41
-rw-r--r--arch/arm/mm/cache-v7.S49
-rw-r--r--arch/arm/mm/cache-v7m.S45
-rw-r--r--arch/arm/mm/proc-arm1020.S39
-rw-r--r--arch/arm/mm/proc-arm1020e.S40
-rw-r--r--arch/arm/mm/proc-arm1022.S39
-rw-r--r--arch/arm/mm/proc-arm1026.S40
-rw-r--r--arch/arm/mm/proc-arm920.S40
-rw-r--r--arch/arm/mm/proc-arm922.S40
-rw-r--r--arch/arm/mm/proc-arm925.S38
-rw-r--r--arch/arm/mm/proc-arm926.S38
-rw-r--r--arch/arm/mm/proc-arm940.S42
-rw-r--r--arch/arm/mm/proc-arm946.S38
-rw-r--r--arch/arm/mm/proc-feroceon.S48
-rw-r--r--arch/arm/mm/proc-mohawk.S38
-rw-r--r--arch/arm/mm/proc-xsc3.S39
-rw-r--r--arch/arm/mm/proc-xscale.S40
22 files changed, 544 insertions, 373 deletions
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 71c64e92dead..c3642d5daf38 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
@@ -39,11 +40,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(fa_flush_icache_all)
+SYM_TYPED_FUNC_START(fa_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(fa_flush_icache_all)
+SYM_FUNC_END(fa_flush_icache_all)
/*
* flush_user_cache_all()
@@ -51,14 +52,16 @@ ENDPROC(fa_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(fa_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(fa_flush_user_cache_all)
+ b fa_flush_kern_cache_all
+SYM_FUNC_END(fa_flush_user_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(fa_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(fa_flush_kern_cache_all)
mov ip, #0
mov r2, #VM_EXEC
__flush_whole_cache:
@@ -69,6 +72,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -80,7 +84,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(fa_flush_user_cache_range)
+SYM_TYPED_FUNC_START(fa_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT @ total size >= limit?
@@ -97,6 +101,7 @@ ENTRY(fa_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -108,8 +113,9 @@ ENTRY(fa_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(fa_coherent_kern_range)
+ b fa_coherent_user_range
+SYM_FUNC_END(fa_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -121,7 +127,7 @@ ENTRY(fa_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_user_range)
+SYM_TYPED_FUNC_START(fa_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -133,6 +139,7 @@ ENTRY(fa_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -143,7 +150,7 @@ ENTRY(fa_coherent_user_range)
* - addr - kernel address
* - size - size of region
*/
-ENTRY(fa_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
@@ -153,6 +160,7 @@ ENTRY(fa_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -203,7 +211,7 @@ fa_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(fa_dma_flush_range)
+SYM_TYPED_FUNC_START(fa_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -212,6 +220,7 @@ ENTRY(fa_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -219,13 +228,13 @@ ENTRY(fa_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_map_area)
+SYM_TYPED_FUNC_START(fa_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq fa_dma_clean_range
bcs fa_dma_inv_range
b fa_dma_flush_range
-ENDPROC(fa_dma_map_area)
+SYM_FUNC_END(fa_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -233,9 +242,9 @@ ENDPROC(fa_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_unmap_area)
+SYM_TYPED_FUNC_START(fa_dma_unmap_area)
ret lr
-ENDPROC(fa_dma_unmap_area)
+SYM_FUNC_END(fa_dma_unmap_area)
.globl fa_flush_kern_cache_louis
.equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 72d939ef8798..56e94091a55f 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -1,47 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include "proc-macros.S"
-ENTRY(nop_flush_icache_all)
+SYM_TYPED_FUNC_START(nop_flush_icache_all)
ret lr
-ENDPROC(nop_flush_icache_all)
+SYM_FUNC_END(nop_flush_icache_all)
- .globl nop_flush_kern_cache_all
- .equ nop_flush_kern_cache_all, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_cache_all)
.globl nop_flush_kern_cache_louis
.equ nop_flush_kern_cache_louis, nop_flush_icache_all
- .globl nop_flush_user_cache_all
- .equ nop_flush_user_cache_all, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_all)
- .globl nop_flush_user_cache_range
- .equ nop_flush_user_cache_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_range)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_range)
- .globl nop_coherent_kern_range
- .equ nop_coherent_kern_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(nop_coherent_kern_range)
-ENTRY(nop_coherent_user_range)
+SYM_TYPED_FUNC_START(nop_coherent_user_range)
mov r0, 0
ret lr
-ENDPROC(nop_coherent_user_range)
-
- .globl nop_flush_kern_dcache_area
- .equ nop_flush_kern_dcache_area, nop_flush_icache_all
+SYM_FUNC_END(nop_coherent_user_range)
- .globl nop_dma_flush_range
- .equ nop_dma_flush_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_dcache_area)
- .globl nop_dma_map_area
- .equ nop_dma_map_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_dma_flush_range)
+ ret lr
+SYM_FUNC_END(nop_dma_flush_range)
- .globl nop_dma_unmap_area
- .equ nop_dma_unmap_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_dma_map_area)
+ ret lr
+SYM_FUNC_END(nop_dma_map_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions nop
+
+SYM_TYPED_FUNC_START(nop_dma_unmap_area)
+ ret lr
+SYM_FUNC_END(nop_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 7787057e4990..22d9c9d9e0d7 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -15,9 +16,9 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4_flush_icache_all)
+SYM_TYPED_FUNC_START(v4_flush_icache_all)
ret lr
-ENDPROC(v4_flush_icache_all)
+SYM_FUNC_END(v4_flush_icache_all)
/*
* flush_user_cache_all()
@@ -27,21 +28,24 @@ ENDPROC(v4_flush_icache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v4_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_flush_user_cache_all)
+ b v4_flush_kern_cache_all
+SYM_FUNC_END(v4_flush_user_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4_flush_kern_cache_all)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -53,14 +57,15 @@ ENTRY(v4_flush_kern_cache_all)
* - end - end address (exclusive, may not be aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -72,8 +77,9 @@ ENTRY(v4_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(v4_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -85,9 +91,10 @@ ENTRY(v4_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_user_range)
+SYM_TYPED_FUNC_START(v4_coherent_user_range)
mov r0, #0
ret lr
+SYM_FUNC_END(v4_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -98,8 +105,9 @@ ENTRY(v4_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4_flush_kern_dcache_area)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area)
+ b v4_dma_flush_range
+SYM_FUNC_END(v4_flush_kern_dcache_area)
/*
* dma_flush_range(start, end)
@@ -109,12 +117,13 @@ ENTRY(v4_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_dma_flush_range)
+SYM_TYPED_FUNC_START(v4_dma_flush_range)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
ret lr
+SYM_FUNC_END(v4_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -122,10 +131,11 @@ ENTRY(v4_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
bne v4_dma_flush_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -133,10 +143,9 @@ ENTRY(v4_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_map_area)
+SYM_TYPED_FUNC_START(v4_dma_map_area)
ret lr
-ENDPROC(v4_dma_unmap_area)
-ENDPROC(v4_dma_map_area)
+SYM_FUNC_END(v4_dma_map_area)
.globl v4_flush_kern_cache_louis
.equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index ad382cee0fdb..0d97b594e23f 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -53,11 +54,11 @@ flush_base:
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wb_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wb_flush_icache_all)
+SYM_FUNC_END(v4wb_flush_icache_all)
/*
* flush_user_cache_all()
@@ -65,14 +66,16 @@ ENDPROC(v4wb_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wb_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_all)
+ b v4wb_flush_kern_cache_all
+SYM_FUNC_END(v4wb_flush_user_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wb_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
@@ -93,6 +96,7 @@ __flush_whole_cache:
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -104,7 +108,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wb_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
@@ -121,6 +125,7 @@ ENTRY(v4wb_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_user_cache_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -131,9 +136,10 @@ ENTRY(v4wb_flush_user_cache_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area)
add r1, r0, r1
- /* fall through */
+ b v4wb_coherent_user_range
+SYM_FUNC_END(v4wb_flush_kern_dcache_area)
/*
* coherent_kern_range(start, end)
@@ -145,8 +151,9 @@ ENTRY(v4wb_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(v4wb_coherent_kern_range)
+ b v4wb_coherent_user_range
+SYM_FUNC_END(v4wb_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -158,7 +165,7 @@ ENTRY(v4wb_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -169,7 +176,7 @@ ENTRY(v4wb_coherent_user_range)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
-
+SYM_FUNC_END(v4wb_coherent_user_range)
/*
* dma_inv_range(start, end)
@@ -231,13 +238,13 @@ v4wb_dma_clean_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_map_area)
+SYM_TYPED_FUNC_START(v4wb_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq v4wb_dma_clean_range
bcs v4wb_dma_inv_range
b v4wb_dma_flush_range
-ENDPROC(v4wb_dma_map_area)
+SYM_FUNC_END(v4wb_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -245,9 +252,9 @@ ENDPROC(v4wb_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr
-ENDPROC(v4wb_dma_unmap_area)
+SYM_FUNC_END(v4wb_dma_unmap_area)
.globl v4wb_flush_kern_cache_louis
.equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 0b290c25a99d..eee6d8f06b4d 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -43,11 +44,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wt_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wt_flush_icache_all)
+SYM_FUNC_END(v4wt_flush_icache_all)
/*
* flush_user_cache_all()
@@ -55,14 +56,16 @@ ENDPROC(v4wt_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wt_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4wt_flush_user_cache_all)
+ b v4wt_flush_kern_cache_all
+SYM_FUNC_END(v4wt_flush_user_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wt_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -70,6 +73,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
ret lr
+SYM_FUNC_END(v4wt_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -81,7 +85,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wt_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
@@ -93,6 +97,7 @@ ENTRY(v4wt_flush_user_cache_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wt_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -104,8 +109,9 @@ ENTRY(v4wt_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_kern_range)
- /* FALLTRHOUGH */
+SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
+ b v4wt_coherent_user_range
+SYM_FUNC_END(v4wt_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -117,7 +123,7 @@ ENTRY(v4wt_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
@@ -125,6 +131,7 @@ ENTRY(v4wt_coherent_user_range)
blo 1b
mov r0, #0
ret lr
+SYM_FUNC_END(v4wt_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -135,11 +142,12 @@ ENTRY(v4wt_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wt_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, r1
- /* fallthrough */
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -167,9 +175,10 @@ v4wt_dma_inv_range:
*
* - start - virtual start address
* - end - virtual end address
- */
- .globl v4wt_dma_flush_range
- .equ v4wt_dma_flush_range, v4wt_dma_inv_range
+*/
+SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -177,11 +186,12 @@ v4wt_dma_inv_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v4wt_dma_inv_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4wt_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -189,10 +199,9 @@ ENTRY(v4wt_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_map_area)
+SYM_TYPED_FUNC_START(v4wt_dma_map_area)
ret lr
-ENDPROC(v4wt_dma_unmap_area)
-ENDPROC(v4wt_dma_map_area)
+SYM_FUNC_END(v4wt_dma_map_area)
.globl v4wt_flush_kern_cache_louis
.equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 44211d8a296f..5c7549a49db5 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -34,7 +35,7 @@
* r0 - set to 0
* r1 - corrupted
*/
-ENTRY(v6_flush_icache_all)
+SYM_TYPED_FUNC_START(v6_flush_icache_all)
mov r0, #0
#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr
@@ -51,7 +52,7 @@ ENTRY(v6_flush_icache_all)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif
ret lr
-ENDPROC(v6_flush_icache_all)
+SYM_FUNC_END(v6_flush_icache_all)
/*
* v6_flush_cache_all()
@@ -60,7 +61,7 @@ ENDPROC(v6_flush_icache_all)
*
* It is assumed that:
*/
-ENTRY(v6_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
@@ -73,6 +74,7 @@ ENTRY(v6_flush_kern_cache_all)
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
ret lr
+SYM_FUNC_END(v6_flush_kern_cache_all)
/*
* v6_flush_cache_all()
@@ -81,8 +83,9 @@ ENTRY(v6_flush_kern_cache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v6_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v6_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v6_flush_user_cache_all)
/*
* v6_flush_cache_range(start, end, flags)
@@ -96,8 +99,9 @@ ENTRY(v6_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v6_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v6_flush_user_cache_range)
ret lr
+SYM_FUNC_END(v6_flush_user_cache_range)
/*
* v6_coherent_kern_range(start,end)
@@ -112,8 +116,9 @@ ENTRY(v6_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v6_coherent_kern_range)
+ b v6_coherent_user_range
+SYM_FUNC_END(v6_coherent_kern_range)
/*
* v6_coherent_user_range(start,end)
@@ -128,7 +133,7 @@ ENTRY(v6_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_user_range)
+SYM_TYPED_FUNC_START(v6_coherent_user_range)
UNWIND(.fnstart )
#ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1
@@ -159,8 +164,7 @@ ENTRY(v6_coherent_user_range)
mov r0, #-EFAULT
ret lr
UNWIND(.fnend )
-ENDPROC(v6_coherent_user_range)
-ENDPROC(v6_coherent_kern_range)
+SYM_FUNC_END(v6_coherent_user_range)
/*
* v6_flush_kern_dcache_area(void *addr, size_t size)
@@ -171,7 +175,7 @@ ENDPROC(v6_coherent_kern_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v6_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area)
add r1, r0, r1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
@@ -188,7 +192,7 @@ ENTRY(v6_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4
#endif
ret lr
-
+SYM_FUNC_END(v6_flush_kern_dcache_area)
/*
* v6_dma_inv_range(start,end)
@@ -253,7 +257,7 @@ v6_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_flush_range)
+SYM_TYPED_FUNC_START(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
@@ -267,6 +271,7 @@ ENTRY(v6_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v6_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -274,12 +279,12 @@ ENTRY(v6_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_map_area)
+SYM_TYPED_FUNC_START(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
b v6_dma_clean_range
-ENDPROC(v6_dma_map_area)
+SYM_FUNC_END(v6_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -287,12 +292,12 @@ ENDPROC(v6_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_unmap_area)
+SYM_TYPED_FUNC_START(v6_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v6_dma_inv_range
ret lr
-ENDPROC(v6_dma_unmap_area)
+SYM_FUNC_END(v6_dma_unmap_area)
.globl v6_flush_kern_cache_louis
.equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 127afe2096ba..5908dd54de47 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -80,12 +81,12 @@ ENDPROC(v7_invalidate_l1)
* Registers:
* r0 - set to 0
*/
-ENTRY(v7_flush_icache_all)
+SYM_TYPED_FUNC_START(v7_flush_icache_all)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ret lr
-ENDPROC(v7_flush_icache_all)
+SYM_FUNC_END(v7_flush_icache_all)
/*
* v7_flush_dcache_louis()
@@ -193,7 +194,7 @@ ENDPROC(v7_flush_dcache_all)
* unification in a single instruction.
*
*/
-ENTRY(v7_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_all)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_all
mov r0, #0
@@ -201,7 +202,7 @@ ENTRY(v7_flush_kern_cache_all)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_all)
+SYM_FUNC_END(v7_flush_kern_cache_all)
/*
* v7_flush_kern_cache_louis(void)
@@ -209,7 +210,7 @@ ENDPROC(v7_flush_kern_cache_all)
* Flush the data cache up to Level of Unification Inner Shareable.
* Invalidate the I-cache to the point of unification.
*/
-ENTRY(v7_flush_kern_cache_louis)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_louis
mov r0, #0
@@ -217,7 +218,7 @@ ENTRY(v7_flush_kern_cache_louis)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_louis)
+SYM_FUNC_END(v7_flush_kern_cache_louis)
/*
* v7_flush_cache_all()
@@ -226,8 +227,9 @@ ENDPROC(v7_flush_kern_cache_louis)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v7_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v7_flush_user_cache_all)
/*
* v7_flush_cache_range(start, end, flags)
@@ -241,10 +243,9 @@ ENTRY(v7_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v7_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v7_flush_user_cache_range)
ret lr
-ENDPROC(v7_flush_user_cache_all)
-ENDPROC(v7_flush_user_cache_range)
+SYM_FUNC_END(v7_flush_user_cache_range)
/*
* v7_coherent_kern_range(start,end)
@@ -259,8 +260,9 @@ ENDPROC(v7_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v7_coherent_kern_range)
+ b v7_coherent_user_range
+SYM_FUNC_END(v7_coherent_kern_range)
/*
* v7_coherent_user_range(start,end)