/*
* User address space access functions.
* The non-inlined parts of asm-metag/uaccess.h are here.
*
* Copyright (C) 2006, Imagination Technologies.
* Copyright (C) 2000, Axis Communications AB.
*
* Written by Hans-Peter Nilsson.
* Pieces used from memcpy, originally by Kenny Ranerup long time ago.
* Modified for Meta by Will Newton.
*/
#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/cache.h> /* def of L1_CACHE_BYTES */
#define USE_RAPF
#define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
/* The "double write" in this code is because the Meta will not fault
* immediately unless the memory pipe is forced to by e.g. a data stall or
* another memory op. The second write should be discarded by the write
* combiner so should have virtually no cost.
*/
#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
asm volatile ( \
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
TENTRY \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret) \
: "0" (to), "1" (from), "2" (ret) \
: "D1Ar1", "memory")
#define __asm_copy_to_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"2: SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
" SETW [%0],D1Ar1\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_to_user_2(to, from, ret) \
__asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_3(to, from, ret) \
__asm_copy_to_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
" SETD [%0],D1Ar1\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_to_user_4(to, from, ret) \
__asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_5(to, from, ret) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
" SETW [%0],D1Ar1\n" \
"4: SETW [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#2\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_to_user_6(to, from, ret) \
__asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_7(to, from, ret) \
__asm_copy_to_user_6x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n", \
" .long 6b,7b\n")
#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_4x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
" SETD [%0],D1Ar1\n" \
"4: SETD [%0++],D1Ar1\n" COPY, \
"5: ADD %2,%2,#4\n" FIXUP, \
" .long 4b,5b\n" TENTRY)
#define __asm_copy_to_user_8(to, from, ret) \
__asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_9(to, from, ret) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"6: SETB [%0++],D1Ar1\n", \
"7: ADD %2,%2,#1\n", \
" .long 6b,7b\n")
#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
" SETW [%0],D1Ar1\n" \
"6: SETW [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#2\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_to_user_10(to, from, ret) \
__asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_11(to, from, ret) \
__asm_copy_to_user_10x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n", \
" .long 8b,9b\n")
#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_8x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
" SETD [%0],D1Ar1\n" \
"6: SETD [%0++],D1Ar1\n" COPY, \
"7: ADD %2,%2,#4\n" FIXUP, \
" .long 6b,7b\n" TENTRY)
#define __asm_copy_to_user_12(to, from, ret) \
__asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_13(to, from, ret) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"8: SETB [%0++],D1Ar1\n", \
"9: ADD %2,%2,#1\n", \
" .long 8b,9b\n")
#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
" SETW [%0],D1Ar1\n" \
"8: SETW [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#2\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_to_user_14(to, from, ret) \
__asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_15(to, from, ret) \
__asm_copy_to_user_14x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
" SETB [%0],D1Ar1\n" \
"10: SETB [%0++],D1Ar1\n", \
"11: ADD %2,%2,#1\n", \
" .long 10b,11b\n")
#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_to_user_12x_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
" SETD [%0],D1Ar1\n" \
"8: SETD [%0++],D1Ar1\n" COPY, \
"9: ADD %2,%2,#4\n" FIXUP, \
" .long 8b,9b\n" TENTRY)
#define __asm_copy_to_user_16(to, from, ret) \
__asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
#define __asm_copy_to_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
" SETL [%0],D0Ar2,D1Ar1\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
"3: ADD %2,%2,#8\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 2b,3b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret) \
: "0" (to), "1" (from), "2" (ret) \
: "D1Ar1", "D0Ar2", "memory")
/*
* optimized copying loop using RAPF when 64 bit aligned
*
* n will be automatically decremented inside the loop
* ret will be left intact. if error occurs we will rewind