diff options
Diffstat (limited to 'arch/avr32/include/asm')
60 files changed, 0 insertions, 4829 deletions
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild deleted file mode 100644 index 3d7ef2c17a7c..000000000000 --- a/arch/avr32/include/asm/Kbuild +++ /dev/null @@ -1,23 +0,0 @@ - -generic-y += clkdev.h -generic-y += delay.h -generic-y += device.h -generic-y += div64.h -generic-y += emergency-restart.h -generic-y += exec.h -generic-y += futex.h -generic-y += irq_regs.h -generic-y += irq_work.h -generic-y += local.h -generic-y += local64.h -generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h -generic-y += param.h -generic-y += percpu.h -generic-y += preempt.h -generic-y += sections.h -generic-y += topology.h -generic-y += trace_clock.h -generic-y += vga.h -generic-y += word-at-a-time.h -generic-y += xor.h diff --git a/arch/avr32/include/asm/addrspace.h b/arch/avr32/include/asm/addrspace.h deleted file mode 100644 index 5a47a7979648..000000000000 --- a/arch/avr32/include/asm/addrspace.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Definitions for the address spaces of the AVR32 CPUs. Heavily based on - * include/asm-sh/addrspace.h - * - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_ADDRSPACE_H -#define __ASM_AVR32_ADDRSPACE_H - -#ifdef CONFIG_MMU - -/* Memory segments when segmentation is enabled */ -#define P0SEG 0x00000000 -#define P1SEG 0x80000000 -#define P2SEG 0xa0000000 -#define P3SEG 0xc0000000 -#define P4SEG 0xe0000000 - -/* Returns the privileged segment base of a given address */ -#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) - -/* Returns the physical address of a PnSEG (n=1,2) address */ -#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) - -/* - * Map an address to a certain privileged segment - */ -#define P1SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \ - | P1SEG)) -#define P2SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \ - | P2SEG)) -#define P3SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \ - | P3SEG)) -#define P4SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \ - | P4SEG)) - -#endif /* CONFIG_MMU */ - -#endif /* __ASM_AVR32_ADDRSPACE_H */ diff --git a/arch/avr32/include/asm/asm-offsets.h b/arch/avr32/include/asm/asm-offsets.h deleted file mode 100644 index d370ee36a182..000000000000 --- a/arch/avr32/include/asm/asm-offsets.h +++ /dev/null @@ -1 +0,0 @@ -#include <generated/asm-offsets.h> diff --git a/arch/avr32/include/asm/asm.h b/arch/avr32/include/asm/asm.h deleted file mode 100644 index a2c64f404b98..000000000000 --- a/arch/avr32/include/asm/asm.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_ASM_H__ -#define __ASM_AVR32_ASM_H__ - -#include <asm/sysreg.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> - -#define mask_interrupts ssrf SYSREG_GM_OFFSET -#define mask_exceptions ssrf SYSREG_EM_OFFSET -#define unmask_interrupts csrf SYSREG_GM_OFFSET -#define unmask_exceptions csrf SYSREG_EM_OFFSET - -#ifdef CONFIG_FRAME_POINTER - .macro save_fp - st.w --sp, r7 - .endm - .macro restore_fp - ld.w r7, sp++ - .endm - .macro zero_fp - mov r7, 0 - .endm -#else - .macro save_fp - .endm - .macro restore_fp - .endm - .macro zero_fp - .endm -#endif - .macro get_thread_info reg - mov \reg, sp - andl \reg, ~(THREAD_SIZE - 1) & 0xffff - .endm - - /* Save and restore registers */ - .macro save_min sr, tmp=lr - pushm lr - mfsr \tmp, \sr - zero_fp - st.w --sp, \tmp - .endm - - .macro restore_min sr, tmp=lr - ld.w \tmp, sp++ - mtsr \sr, \tmp - popm lr - .endm - - .macro save_half sr, tmp=lr - save_fp - pushm r8-r9,r10,r11,r12,lr - zero_fp - mfsr \tmp, \sr - st.w --sp, \tmp - .endm - - .macro restore_half sr, tmp=lr - ld.w \tmp, sp++ - mtsr \sr, \tmp - popm r8-r9,r10,r11,r12,lr - restore_fp - .endm - - .macro save_full_user sr, tmp=lr - stmts --sp, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr - st.w --sp, lr - zero_fp - mfsr \tmp, \sr - st.w --sp, \tmp - .endm - - .macro restore_full_user sr, tmp=lr - ld.w \tmp, sp++ - mtsr \sr, \tmp - ld.w lr, sp++ - ldmts sp++, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr - .endm - - /* uaccess macros */ - .macro branch_if_kernel scratch, label - get_thread_info \scratch - ld.w \scratch, \scratch[TI_flags] - bld \scratch, TIF_USERSPACE - brcc \label - .endm - - .macro ret_if_privileged scratch, addr, size, ret - sub \scratch, \size, 1 - add \scratch, \addr - retcs \ret - retmi \ret - .endm - -#endif /* __ASM_AVR32_ASM_H__ */ diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h deleted file mode 100644 index 3d5ce38a6f0b..000000000000 --- a/arch/avr32/include/asm/atomic.h +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc. - * - * But use these as seldom as possible since they are slower than - * regular operations. - * - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_ATOMIC_H -#define __ASM_AVR32_ATOMIC_H - -#include <linux/types.h> -#include <asm/cmpxchg.h> - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \ -static inline int __atomic_##op##_return(int i, atomic_t *v) \ -{ \ - int result; \ - \ - asm volatile( \ - "/* atomic_" #op "_return */\n" \ - "1: ssrf 5\n" \ - " ld.w %0, %2\n" \ - " " #asm_op " %0, %3\n" \ - " stcond %1, %0\n" \ - " brne 1b" \ - : "=&r" (result), "=o" (v->counter) \ - : "m" (v->counter), #asm_con (i) \ - : "cc"); \ - \ - return result; \ -} - -#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \ -static inline int __atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - int result, val; \ - \ - asm volatile( \ - "/* atomic_fetch_" #op " */\n" \ - "1: ssrf 5\n" \ - " ld.w %0, %3\n" \ - " mov %1, %0\n" \ - " " #asm_op " %1, %4\n" \ - " stcond %2, %1\n" \ - " brne 1b" \ - : "=&r" (result), "=&r" (val), "=o" (v->counter) \ - : "m" (v->counter), #asm_con (i) \ - : "cc"); \ - \ - return result; \ -} - -ATOMIC_OP_RETURN(sub, sub, rKs21) -ATOMIC_OP_RETURN(add, add, r) -ATOMIC_FETCH_OP (sub, sub, rKs21) -ATOMIC_FETCH_OP (add, add, r) - -#define ATOMIC_OPS(op, asm_op) \ -ATOMIC_OP_RETURN(op, asm_op, r) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - (void)__atomic_##op##_return(i, v); \ -} \ -ATOMIC_FETCH_OP(op, asm_op, r) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ -{ \ - return __atomic_fetch_##op(i, v); \ -} - -ATOMIC_OPS(and, and) -ATOMIC_OPS(or, or) -ATOMIC_OPS(xor, eor) - -#undef ATOMIC_OPS -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN - -/* - * Probably found the reason why we want to use sub with the signed 21-bit - * limit, it uses one less register than the add instruction that can add up to - * 32-bit values. - * - * Both instructions are 32-bit, to use a 16-bit instruction the immediate is - * very small; 4 bit. - * - * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate. - * add 32-bit, type II, adds two register values together. - */ -#define IS_21BIT_CONST(i) \ - (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576)) - -/* - * atomic_add_return - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. Returns the resulting value. - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - if (IS_21BIT_CONST(i)) - return __atomic_sub_return(-i, v); - - return __atomic_add_return(i, v); -} - -static inline int atomic_fetch_add(int i, atomic_t *v) -{ - if (IS_21BIT_CONST(i)) - return __atomic_fetch_sub(-i, v); - - return __atomic_fetch_add(i, v); -} - -/* - * atomic_sub_return - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. Returns the resulting value. - */ -static inline int atomic_sub_return(int i, atomic_t *v) -{ - if (IS_21BIT_CONST(i)) - return __atomic_sub_return(i, v); - - return __atomic_add_return(-i, v); -} - -static inline int atomic_fetch_sub(int i, atomic_t *v) -{ - if (IS_21BIT_CONST(i)) - return __atomic_fetch_sub(i, v); - - return __atomic_fetch_add(-i, v); -} - -/* - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. -*/ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int tmp, old = atomic_read(v); - - if (IS_21BIT_CONST(a)) { - asm volatile( - "/* __atomic_sub_unless */\n" - "1: ssrf 5\n" - " ld.w %0, %2\n" - " cp.w %0, %4\n" - " breq 1f\n" - " sub %0, %3\n" - " stcond %1, %0\n" - " brne 1b\n" - "1:" - : "=&r"(tmp), "=o"(v->counter) - : "m"(v->counter), "rKs21"(-a), "rKs21"(u) - : "cc", "memory"); - } else { - asm volatile( - "/* __atomic_add_unless */\n" - "1: ssrf 5\n" - " ld.w %0, %2\n" - " cp.w %0, %4\n" - " breq 1f\n" - " add %0, %3\n" - " stcond %1, %0\n" - " brne 1b\n" - "1:" - : "=&r"(tmp), "=o"(v->counter) - : "m"(v->counter), "r"(a), "ir"(u) - : "cc", "memory"); - } - - return old; -} - -#undef IS_21BIT_CONST - -/* - * atomic_sub_if_positive - conditionally subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. - */ -static inline int atomic_sub_if_positive(int i, atomic_t *v) -{ - int result; - - asm volatile( - "/* atomic_sub_if_positive */\n" - "1: ssrf 5\n" - " ld.w %0, %2\n" - " sub %0, %3\n" - " brlt 1f\n" - " stcond %1, %0\n" - " brne 1b\n" - "1:" - : "=&r"(result), "=o"(v->counter) - : "m"(v->counter), "ir"(i) - : "cc", "memory"); - - return result; -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) - -#define atomic_sub(i, v) (void)atomic_sub_return(i, v) -#define atomic_add(i, v) (void)atomic_add_return(i, v) -#define atomic_dec(v) atomic_sub(1, (v)) -#define atomic_inc(v) atomic_add(1, (v)) - -#define atomic_dec_return(v) atomic_sub_return(1, v) -#define atomic_inc_return(v) atomic_add_return(1, v) - -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) - -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) - -#endif /* __ASM_AVR32_ATOMIC_H */ diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h deleted file mode 100644 index 715100790fd0..000000000000 --- a/arch/avr32/include/asm/barrier.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_BARRIER_H -#define __ASM_AVR32_BARRIER_H - -/* - * Weirdest thing ever.. no full barrier, but it has a write barrier! - */ -#define wmb() asm volatile("sync 0" : : : "memory") - -#ifdef CONFIG_SMP -# error "The AVR32 port does not support SMP" -#endif - -#include <asm-generic/barrier.h> - -#endif /* __ASM_AVR32_BARRIER_H */ diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h deleted file mode 100644 index 910d5374ce59..000000000000 --- a/arch/avr32/include/asm/bitops.h +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Copyright (C) 2004-2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_BITOPS_H -#define __ASM_AVR32_BITOPS_H - -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -#include <asm/byteorder.h> -#include <asm/barrier.h> - -/* - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long tmp; - - if (__builtin_constant_p(nr)) { - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %2\n" - " sbr %0, %3\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p) - : "m"(*p), "i"(nr) - : "cc"); - } else { - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %2\n" - " or %0, %3\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p) - : "m"(*p), "r"(mask) - : "cc"); - } -} - -/* - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long tmp; - - if (__builtin_constant_p(nr)) { - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %2\n" - " cbr %0, %3\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p) - : "m"(*p), "i"(nr) - : "cc"); - } else { - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %2\n" - " andn %0, %3\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p) - : "m"(*p), "r"(mask) - : "cc"); - } -} - -/* - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - unsigned long tmp; - - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %2\n" - " eor %0, %3\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p) - : "m"(*p), "r"(mask) - : "cc"); -} - -/* - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - unsigned long tmp, old; - - if (__builtin_constant_p(nr)) { - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %3\n" - " mov %2, %0\n" - " sbr %0, %4\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p), "=&r"(old) - : "m"(*p), "i"(nr) - : "memory", "cc"); - } else { - asm volatile( - "1: ssrf 5\n" - " ld.w %2, %3\n" - " or %0, %2, %4\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p), "=&r"(old) - : "m"(*p), "r"(mask) - : "memory", "cc"); - } - - return (old & mask) != 0; -} - -/* - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - unsigned long tmp, old; - - if (__builtin_constant_p(nr)) { - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %3\n" - " mov %2, %0\n" - " cbr %0, %4\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p), "=&r"(old) - : "m"(*p), "i"(nr) - : "memory", "cc"); - } else { - asm volatile( - "1: ssrf 5\n" - " ld.w %0, %3\n" - " mov %2, %0\n" - " andn %0, %4\n" - " stcond %1, %0\n" - " brne 1b" - : "=&r"(tmp), "=o"(*p), "=&r"(old) - : "m"(*p), "r"(mask) - : "memory", "cc"); - } - - return (old & mask) != 0; -} - -/* - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile void * addr) -{ - unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; - unsigned long mask = 1UL << (nr % BITS_PER_LONG); - unsigned long tmp, old; - - asm volatile( - "1: ssrf 5\n" |
