diff options
| -rw-r--r-- | arch/x86/include/asm/percpu.h | 3 | ||||
| -rw-r--r-- | drivers/target/target_core_tpg.c | 4 | ||||
| -rw-r--r-- | fs/aio.c | 6 | ||||
| -rw-r--r-- | include/asm-generic/percpu.h | 410 | ||||
| -rw-r--r-- | include/linux/percpu-defs.h | 380 | ||||
| -rw-r--r-- | include/linux/percpu-refcount.h | 64 | ||||
| -rw-r--r-- | include/linux/percpu.h | 673 | ||||
| -rw-r--r-- | kernel/cgroup.c | 8 | ||||
| -rw-r--r-- | kernel/workqueue.c | 6 | ||||
| -rw-r--r-- | lib/percpu-refcount.c | 86 | ||||
| -rw-r--r-- | mm/percpu.c | 3 |
11 files changed, 832 insertions, 811 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 851bcdc5db04..fd472181a1d0 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -52,10 +52,9 @@ * Compared to the generic __my_cpu_offset version, the following * saves one instruction and avoids clobbering a temp register. */ -#define raw_cpu_ptr(ptr) \ +#define arch_raw_cpu_ptr(ptr) \ ({ \ unsigned long tcp_ptr__; \ - __verify_pcpu_ptr(ptr); \ asm volatile("add " __percpu_arg(1) ", %0" \ : "=r" (tcp_ptr__) \ : "m" (this_cpu_off), "0" (ptr)); \ diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index c036595b17cf..fddfae61222f 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -825,7 +825,7 @@ int core_tpg_add_lun( ret = core_dev_export(dev, tpg, lun); if (ret < 0) { - percpu_ref_cancel_init(&lun->lun_ref); + percpu_ref_exit(&lun->lun_ref); return ret; } @@ -880,5 +880,7 @@ int core_tpg_post_dellun( lun->lun_status = TRANSPORT_LUN_STATUS_FREE; spin_unlock(&tpg->tpg_lun_lock); + percpu_ref_exit(&lun->lun_ref); + return 0; } @@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work) aio_free_ring(ctx); free_percpu(ctx->cpu); + percpu_ref_exit(&ctx->reqs); + percpu_ref_exit(&ctx->users); kmem_cache_free(kioctx_cachep, ctx); } @@ -715,8 +717,8 @@ err_ctx: err: mutex_unlock(&ctx->ring_lock); free_percpu(ctx->cpu); - free_percpu(ctx->reqs.pcpu_count); - free_percpu(ctx->users.pcpu_count); + percpu_ref_exit(&ctx->reqs); + percpu_ref_exit(&ctx->users); kmem_cache_free(kioctx_cachep, ctx); pr_debug("error allocating ioctx %d\n", err); return ERR_PTR(err); diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 0703aa75b5e8..4d9f233c4ba8 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -36,93 +36,385 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #endif /* - * Add a offset to a pointer but keep the pointer as is. - * - * Only S390 provides its own means of moving the pointer. + * Arch may define arch_raw_cpu_ptr() to provide more efficient address + * translations for raw_cpu_ptr(). */ -#ifndef SHIFT_PERCPU_PTR -/* Weird cast keeps both GCC and sparse happy. */ -#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ - __verify_pcpu_ptr((__p)); \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ -}) +#ifndef arch_raw_cpu_ptr +#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #endif -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. - */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) - -#ifndef raw_cpu_ptr -#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); #endif -#ifdef CONFIG_DEBUG_PREEMPT -#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) + +#endif /* SMP */ + +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" #else -#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#define PER_CPU_BASE_SECTION ".data" +#endif #endif -#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -extern void setup_per_cpu_areas(void); +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES #endif -#else /* ! SMP */ +#define raw_cpu_generic_to_op(pcp, val, op) \ +do { \ + *raw_cpu_ptr(&(pcp)) op val; \ +} while (0) -#define VERIFY_PERCPU_PTR(__p) ({ \ - __verify_pcpu_ptr((__p)); \ - (typeof(*(__p)) __kernel __force *)(__p); \ +#define raw_cpu_generic_add_return(pcp, val) \ +({ \ + raw_cpu_add(pcp, val); \ + raw_cpu_read(pcp); \ }) -#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) -#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) -#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) -#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) +#define raw_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#endif /* SMP */ +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + __ret; \ +}) -#ifndef PER_CPU_BASE_SECTION -#ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data..percpu" -#else -#define PER_CPU_BASE_SECTION ".data" +#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret = 0; \ + if (raw_cpu_read(pcp1) == (oval1) && \ + raw_cpu_read(pcp2) == (oval2)) { \ + raw_cpu_write(pcp1, nval1); \ + raw_cpu_write(pcp2, nval2); \ + __ret = 1; \ + } \ + (__ret); \ +}) + +#define this_cpu_generic_read(pcp) \ +({ \ + typeof(pcp) __ret; \ + preempt_disable(); \ + __ret = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + __ret; \ +}) + +#define this_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *raw_cpu_ptr(&(pcp)) op val; \ + raw_local_irq_restore(__flags); \ +} while (0) + +#define this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + raw_cpu_add(pcp, val); \ + __ret = raw_cpu_read(pcp); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_xchg(pcp, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_read(pcp); \ + if (__ret == (oval)) \ + raw_cpu_write(pcp, nval); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ \ + int __ret; \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ + oval1, oval2, nval1, nval2); \ + raw_local_irq_restore(__flags); \ + __ret; \ +}) + +#ifndef raw_cpu_read_1 +#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) #endif +#ifndef raw_cpu_read_2 +#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_4 +#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) +#endif +#ifndef raw_cpu_read_8 +#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) #endif -#ifdef CONFIG_SMP +#ifndef raw_cpu_write_1 +#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_2 +#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_4 +#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef raw_cpu_write_8 +#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) +#endif -#ifdef MODULE -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "" -#else -#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#ifndef raw_cpu_add_1 +#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_2 +#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_4 +#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef raw_cpu_add_8 +#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) #endif -#define PER_CPU_FIRST_SECTION "..first" -#else +#ifndef raw_cpu_and_1 +#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_2 +#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_4 +#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef raw_cpu_and_8 +#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef raw_cpu_or_1 +#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_2 +#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_4 +#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef raw_cpu_or_8 +#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) +#endif -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_FIRST_SECTION "" +#ifndef raw_cpu_add_return_1 +#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_2 +#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_4 +#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_add_return_8 +#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) +#endif +#ifndef raw_cpu_xchg_1 +#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_2 +#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_4 +#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) +#endif +#ifndef raw_cpu_xchg_8 +#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif -#ifndef PER_CPU_ATTRIBUTES -#define PER_CPU_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_1 +#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_2 +#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_4 +#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef raw_cpu_cmpxchg_8 +#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef PER_CPU_DEF_ATTRIBUTES -#define PER_CPU_DEF_ATTRIBUTES +#ifndef raw_cpu_cmpxchg_double_1 +#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_2 +#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_4 +#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef raw_cpu_cmpxchg_double_8 +#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif + +#ifndef this_cpu_read_1 +#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_2 +#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_4 +#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) +#endif +#ifndef this_cpu_read_8 +#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) #endif -/* Keep until we have removed all uses of __this_cpu_ptr */ -#define __this_cpu_ptr raw_cpu_ptr +#ifndef this_cpu_write_1 +#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_2 +#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_4 +#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif +#ifndef this_cpu_write_8 +#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) +#endif + +#ifndef this_cpu_add_1 +#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_2 +#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_4 +#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif +#ifndef this_cpu_add_8 +#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) +#endif + +#ifndef this_cpu_and_1 +#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_2 +#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_4 +#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif +#ifndef this_cpu_and_8 +#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) +#endif + +#ifndef this_cpu_or_1 +#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_2 +#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_4 +#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif +#ifndef this_cpu_or_8 +#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) +#endif + +#ifndef this_cpu_add_return_1 +#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_2 +#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_4 +#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif +#ifndef this_cpu_add_return_8 +#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) +#endif + +#ifndef this_cpu_xchg_1 +#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_2 +#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_4 +#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif +#ifndef this_cpu_xchg_8 +#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) +#endif + +#ifndef this_cpu_cmpxchg_1 +#define this_cpu_cmpxchg_1(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_2 +#define this_cpu_cmpxchg_2(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_4 +#define this_cpu_cmpxchg_4(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif +#ifndef this_cpu_cmpxchg_8 +#define this_cpu_cmpxchg_8(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) +#endif + +#ifndef this_cpu_cmpxchg_double_1 +#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_2 +#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_4 +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif +#ifndef this_cpu_cmpxchg_double_8 +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#endif #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index dec01d6c3f80..cfd56046ecec 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -1,6 +1,40 @@ +/* + * linux/percpu-defs.h - basic definitions for percpu areas + * + * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. + * + * This file is separate from linux/percpu.h to avoid cyclic inclusion + * dependency from arch header files. Only to be included from + * asm/percpu.h. + * + * This file includes macros necessary to declare percpu sections and + * variables, and definitions of percpu accessors and operations. It + * should provide enough percpu features to arch header files even when + * they can only include asm/percpu.h to avoid cyclic inclusion dependency. + */ + #ifndef _LINUX_PERCPU_DEFS_H #define _LINUX_PERCPU_DEFS_H +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION "..first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + /* * Base implementations of per-CPU variable declarations and definitions, where * the section in which the variable is to be placed is provided by the @@ -19,19 +53,6 @@ __attribute__((section(".discard"), unused)) /* - * Macro which verifies @ptr is a percpu pointer without evaluating - * @ptr. This is to be used in percpu accessors to verify that the - * input parameter is a percpu pointer. - * - * + 0 is required in order to convert the pointer type from a - * potential array type to a pointer to a single item of the array. - */ -#define __verify_pcpu_ptr(ptr) do { \ - const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ - (void)__vpp_verify; \ -} while (0) - -/* * s390 and alpha modules require percpu variables to be defined as * weak to force the compiler to generate GOT based external * references for them. This is necessary because percpu sections @@ -164,4 +185,337 @@ #define EXPORT_PER_CPU_SYMBOL_GPL(var) #endif +/* + * Accessors and operations. + */ +#ifndef __ASSEMBLY__ + +/* + * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating + * @ptr and is invoked once before a percpu area is accessed by all + * accessors and operations. This is performed in the generic part of + * percpu and arch overrides don't need to worry about it; however, if an + * arch wants to implement an arch-specific percpu accessor or operation, + * it may use __verify_pcpu_ptr() to verify the parameters. + * + * + 0 is required in order to convert the pointer type from a + * potential array type to a pointer to a single item of the array. + */ +#define __verify_pcpu_ptr(ptr) \ +do { \ + const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ + (void)__vpp_verify; \ +} while (0) + +#ifdef CONFIG_SMP + +/* + * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() + * to prevent the compiler from making incorrect assumptions about the + * pointer value. The weird cast keeps both GCC and sparse happy. + */ +#define SHIFT_PERCPU_PTR(__p, __offset) \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ +}) + +#define raw_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + arch_raw_cpu_ptr(ptr); \ +}) + +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ +}) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif + +#else /* CONFIG_SMP */ + +#define VERIFY_PERCPU_PTR(__p) \ +({ \ + __verify_pcpu_ptr(__p); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) +#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +#endif /* CONFIG_SMP */ + +#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) + +/* keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +/* + * Must be an lvalue. Since @var must be a simple identifier, + * we force a syntax error here if it isn't. + */ +#define get_cpu_var(var) \ +(*({ \ + preempt_disable(); \ + this_cpu_ptr(&var); \ +})) + +/* + * The weird & is necessary because sparse considers (void)(var) to be + * a direct dereference of percpu variable (var). + */ +#define put_cpu_var(var) \ +do { \ + (void)&(var); \ + preempt_enable(); \ +} while (0) + +#define get_cpu_ptr(var) \ +({ \ + preempt_disable(); \ + this_cpu_ptr(var); \ +}) + +#define put_cpu_ptr(var) \ +do { \ + (void)(var); \ + preempt_enable(); \ +} while (0) + +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#ifdef CONFIG_DEBUG_PREEMPT +extern void __this_cpu_preempt_check(const char *op); +#else +static inline void __this_cpu_preempt_check(const char *op) { } +#endif + +#define __pcpu_size_call_return(stem, variable) \ +({ \ + typeof(variable) pscr_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable); break; \ + case 2: pscr_ret__ = stem##2(variable); break; \ + case 4: pscr_ret__ = stem##4(variable); break; \ + case 8: pscr_ret__ = stem##8(variable); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + +/* + * Special handling for cmpxchg_double. cmpxchg_double is passed two + * percpu variables. The first has to be aligned to a double word + * boundary and the second has to follow directly thereafter. + * We enforce this on all architectures even if they don't support + * a double cmpxchg instruction, since it's a cheap requirement, and it + * avoids breaking the requirement for architectures with the instruction. + */ +#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +({ \ + bool pdcrb_ret__; \ + __verify_pcpu_ptr(&(pcp1)); \ + BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ + VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ + VM_BUG_ON((unsigned long)(&(pcp2)) != \ + (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ + switch(sizeof(pcp1)) { \ + case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ + case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ + case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ + case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pdcrb_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> + * + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables. + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The arch code can provide optimized implementation by defining macros + * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per + * cpu atomic operations for 2 byte sized RMW actions. If arch code does + * not provide operations for a scalar size then the fallback in the + * generic code will be used. + * + * cmpxchg_double replaces two adjacent scalars at once. The first two + * parameters are per cpu variables which have to be of the same size. A + * truth value is returned to indicate success or failure (since a double + * register result is difficult to handle). There is very limited hardware + * support for these operations, so only certain sizes may work. + */ + +/* + * Operations for contexts where we do not want to do any checks for + * preemptions. Unless strictly necessary, always use [__]this_cpu_*() + * instead. + * + * If there is no other protection through preempt disable and/or disabling + * interupts then one of these RMW operations can show unexpected behavior + * because the execution thread was rescheduled on another processor or an + * interrupt occurred and the same percpu variable was modified from the + * interrupt context. + */ +#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) +#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) +#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) +#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) +#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) +#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) +#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) +#define raw_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) +#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) +#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) +#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) +#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) +#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) + +/* + * Operations for contexts that are safe from preemption/interrupts. These + * operations verify that preemption is disabled. + */ +#define __this_cpu_read(pcp) \ +({ \ + __this_cpu_preempt_check("read"); \ + raw_cpu_read(pcp); \ +}) + +#define __this_cpu_write(pcp, val) \ +({ \ + __this_cpu_preempt_check("write"); \ + raw_cpu_write(pcp, val); \ +}) + +#define __this_cpu_add(pcp, val) \ +({ \ + __this_cpu_preempt_check("add"); \ + raw_cpu_add(pcp, val); \ +}) + +#define __this_cpu_and(pcp, val) \ +({ \ + __this_cpu_preempt_check("and"); \ + raw_cpu_and(pcp, val); \ +}) + +#define __this_cpu_or(pcp, val) \ +({ \ + __this_cpu_preempt_check("or"); \ + raw_cpu_or(pcp, val); \ +}) + +#define __this_cpu_add_return(pcp, val) \ +({ \ + __this_cpu_preempt_check("add_return"); \ + raw_cpu_add_return(pcp, val); \ +}) + +#define __this_cpu_xchg(pcp, nval) \ +({ \ + __this_cpu_preempt_check("xchg"); \ + raw_cpu_xchg(pcp, nval); \ +}) + +#define __this_cpu_cmpxchg(pcp, oval, nval) \ +({ \ + __this_cpu_preempt_check("cmpxchg"); \ + raw_cpu_cmpxchg(pcp, oval, nval); \ +}) + +#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ __this_cpu_preempt_check("cmpxchg_double"); \ + raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ +}) + +#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) +#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) + +/* + * Operations with implied preemption protection. These operations can be + * used without worrying about preemption. Note that interrupts may still + * occur while an operation is in progress and if the interrupt modifies + * the variable too then RMW actions may not be reliable. + */ +#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) +#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) +#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) +#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) +#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) +#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) +#define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) +#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 5d8920e23073..3dfbf237cd8f 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -57,11 +57,9 @@ struct percpu_ref { atomic_t count; /* * The low bit of the pointer indicates whether the ref is in percpu - * mode; if set, then get/put will manipulate the atomic_t (this is a - * hack because we need to keep the pointer around for - * percpu_ref_kill_rcu()) + * mode; if set, then get/put will manipulate the atomic_t. */ - unsigned __percpu *pcpu_count; + unsigned long pcpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct rcu_head rcu; @@ -69,7 +67,8 @@ struct percpu_ref { int __must_check percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release); -void percpu_ref_cancel_init(struct percpu_ref *ref); +void percpu_ref_reinit(struct percpu_ref *ref); +void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); @@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_STATUS_BITS 2 -#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1) -#define PCPU_REF_PTR 0 #define PCPU_REF_DEAD 1 -#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) +/* + * Internal helper. Don't use outside percpu-refcount proper. The + * function doesn't return the pointer and let the caller test it for NULL + * because doing so forces the compiler to generate two conditional + * branches as it can't assume that @ref->pcpu_count is not NULL. + */ +static inline bool __pcpu_ref_alive(struct percpu_ref *ref, + unsigned __percpu **pcpu_countp) +{ + unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); + + /* paired with smp_store_release() in percpu_ref_reinit() */ + smp_read_barrier_depends(); + + if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + return false; + + *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + return true; +} /** |
