From e5e16d8f3ec6973af2068897786be619cf97860e Mon Sep 17 00:00:00 2001 From: Boqun Feng Date: Mon, 7 Sep 2015 07:58:00 +0800 Subject: powerpc: Kconfig: remove BE-only platforms from LE kernel build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, little endian is only supported on powernv and pseries, however, Kconfigs still allow us to include other platforms in a LE kernel, this may result in space wasting or even build error if some BE-only platforms always assume they are built for a BE kernel. So just modify the Kconfigs of BE-only platforms to remove them from being built for a LE kernel. For 32bit only platforms, nothing needs to be done, because CPU_LITTLE_ENDIAN depends on PPC64. For 64bit supported platforms, add CPU_BIG_ENDIAN to dependencies explicitly, so that these platforms will be disabled for LE [Suggested-by: Cédric Le Goater ]. Signed-off-by: Boqun Feng Acked-by: Geoff Levand Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/cell/Kconfig | 4 ++-- arch/powerpc/platforms/maple/Kconfig | 2 +- arch/powerpc/platforms/pasemi/Kconfig | 2 +- arch/powerpc/platforms/powermac/Kconfig | 2 +- arch/powerpc/platforms/ps3/Kconfig | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index b0ac1773cea6..429fc59d2a47 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -25,7 +25,7 @@ config PPC_CELL_NATIVE config PPC_IBM_CELL_BLADE bool "IBM Cell Blade" - depends on PPC64 && PPC_BOOK3S + depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN select PPC_CELL_NATIVE select PPC_OF_PLATFORM_PCI select PCI @@ -35,7 +35,7 @@ config PPC_IBM_CELL_BLADE config PPC_CELL_QPACE bool "IBM Cell - QPACE" - depends on PPC64 && PPC_BOOK3S + depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN select PPC_CELL_COMMON config AXON_MSI diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig index 1ea621a94c3b..e359d0db092c 100644 --- a/arch/powerpc/platforms/maple/Kconfig +++ b/arch/powerpc/platforms/maple/Kconfig @@ -1,5 +1,5 @@ config PPC_MAPLE - depends on PPC64 && PPC_BOOK3S + depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN bool "Maple 970FX Evaluation Board" select PCI select MPIC diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig index a2aeb327d185..00d4b28cbb60 100644 --- a/arch/powerpc/platforms/pasemi/Kconfig +++ b/arch/powerpc/platforms/pasemi/Kconfig @@ -1,5 +1,5 @@ config PPC_PASEMI - depends on PPC64 && PPC_BOOK3S + depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN bool "PA Semi SoC-based platforms" default n select MPIC diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig index 607124bae2e7..43c606268baf 100644 --- a/arch/powerpc/platforms/powermac/Kconfig +++ b/arch/powerpc/platforms/powermac/Kconfig @@ -1,6 +1,6 @@ config PPC_PMAC bool "Apple PowerMac based machines" - depends on PPC_BOOK3S + depends on PPC_BOOK3S && CPU_BIG_ENDIAN select MPIC select PCI select PPC_INDIRECT_PCI if PPC32 diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 56f274064d6c..b27f40f26efc 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -1,6 +1,6 @@ config PPC_PS3 bool "Sony PS3" - depends on PPC64 && PPC_BOOK3S + depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN select PPC_CELL select USB_OHCI_LITTLE_ENDIAN select USB_OHCI_BIG_ENDIAN_MMIO -- cgit v1.2.3 From 336382c78b926af7f3b22f738998962cec5a26e1 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Mon, 14 Sep 2015 14:36:35 -0700 Subject: powerpc/ps3: Refresh ps3_defconfig Refresh and remove obsolete CONFIG_EXT3_FS. Signed-off-by: Geoff Levand Signed-off-by: Michael Ellerman --- arch/powerpc/configs/ps3_defconfig | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index adc14e813a49..683b4c301feb 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -53,7 +53,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_IPV6=y CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -141,8 +140,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PS3=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=m -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT4_FS=y CONFIG_QUOTA=y CONFIG_QFMT_V2=y @@ -175,9 +172,7 @@ CONFIG_DEBUG_LOCKDEP=y CONFIG_DEBUG_LIST=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -- cgit v1.2.3 From 787b393c9f6300c343600d39f53f1b9f09d3684f Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 7 Aug 2015 13:05:42 +1000 Subject: powerpc/vdso: Emit GNU & SysV hashes Andy Lutomirski says: Some dynamic loaders may be slightly faster if a GNU hash is available. This is unlikely to have any measurable effect on the time it takes to resolve vdso symbols (since there are so few of them). In some contexts, it can be a win for a different reason: if every DSO has a GNU hash section, then libc can avoid calculating SysV hashes at all. Both musl and glibc appear to have this optimization. Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/vdso32/Makefile | 2 +- arch/powerpc/kernel/vdso64/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 53e6c9b979ec..6abffb7a8cd9 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -18,7 +18,7 @@ GCOV_PROFILE := n ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) asflags-y := -D__VDSO32__ -s obj-y += vdso32_wrapper.o diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index effca9404b17..8c8f2ae43935 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -11,7 +11,7 @@ GCOV_PROFILE := n ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) asflags-y := -D__VDSO64__ -s obj-y += vdso64_wrapper.o -- cgit v1.2.3 From 1d15010c349a26640e8f24959390bcf2ebd1db60 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 13 Aug 2015 17:07:54 +1000 Subject: powerpc/slb: Define an enum for the bolted indexes This patch defines macros for the three bolted SLB indexes we use. Switch the functions that take the indexes as an argument to use the enum. Signed-off-by: Anshuman Khandual Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slb.c | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 8a32a2be3c53..0c7115fd314b 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -25,6 +25,11 @@ #include #include +enum slb_index { + LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ + VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */ + KSTACK_INDEX = 2, /* Kernel stack map */ +}; extern void slb_allocate_realmode(unsigned long ea); extern void slb_allocate_user(unsigned long ea); @@ -41,9 +46,9 @@ static void slb_allocate(unsigned long ea) (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) static inline unsigned long mk_esid_data(unsigned long ea, int ssize, - unsigned long entry) + enum slb_index index) { - return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry; + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; } static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, @@ -55,39 +60,39 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, static inline void slb_shadow_update(unsigned long ea, int ssize, unsigned long flags, - unsigned long entry) + enum slb_index index) { /* * Clear the ESID first so the entry is not valid while we are * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ - get_slb_shadow()->save_area[entry].esid = 0; - get_slb_shadow()->save_area[entry].vsid = + get_slb_shadow()->save_area[index].esid = 0; + get_slb_shadow()->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); - get_slb_shadow()->save_area[entry].esid = - cpu_to_be64(mk_esid_data(ea, ssize, entry)); + get_slb_shadow()->save_area[index].esid = + cpu_to_be64(mk_esid_data(ea, ssize, index)); } -static inline void slb_shadow_clear(unsigned long entry) +static inline void slb_shadow_clear(enum slb_index index) { - get_slb_shadow()->save_area[entry].esid = 0; + get_slb_shadow()->save_area[index].esid = 0; } static inline void create_shadowed_slbe(unsigned long ea, int ssize, unsigned long flags, - unsigned long entry) + enum slb_index index) { /* * Updating the shadow buffer before writing the SLB ensures * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ - slb_shadow_update(ea, ssize, flags, entry); + slb_shadow_update(ea, ssize, flags, index); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, ssize, flags)), - "r" (mk_esid_data(ea, ssize, entry)) + "r" (mk_esid_data(ea, ssize, index)) : "memory" ); } @@ -103,16 +108,16 @@ static void __slb_flush_and_rebolt(void) lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; - ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); + ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { ksp_esid_data &= ~SLB_ESID_V; ksp_vsid_data = 0; - slb_shadow_clear(2); + slb_shadow_clear(KSTACK_INDEX); } else { /* Update stack entry; others don't change */ - slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); + slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); ksp_vsid_data = - be64_to_cpu(get_slb_shadow()->save_area[2].vsid); + be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); } /* We need to do this all in asm, so we're sure we don't touch @@ -151,7 +156,7 @@ void slb_vmalloc_update(void) unsigned long vflags; vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); + slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); slb_flush_and_rebolt(); } @@ -326,19 +331,19 @@ void slb_initialize(void) asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); - create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); - create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); + create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); + create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); /* For the boot cpu, we're running on the stack in init_thread_union, * which is in the first segment of the linear mapping, and also * get_paca()->kstack hasn't been initialized yet. * For secondary cpus, we need to bolt the kernel stack entry now. */ - slb_shadow_clear(2); + slb_shadow_clear(KSTACK_INDEX); if (raw_smp_processor_id() != boot_cpuid && (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) create_shadowed_slbe(get_paca()->kstack, - mmu_kernel_ssize, lflags, 2); + mmu_kernel_ssize, lflags, KSTACK_INDEX); asm volatile("isync":::"memory"); } -- cgit v1.2.3 From 26cd835ef8bdc9ca6db03374372738176c36dd88 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 13 Aug 2015 17:11:18 +1000 Subject: powerpc/slb: Use a local to avoid multiple calls to get_slb_shadow() For no reason other than it looks ugly. Signed-off-by: Michael Ellerman --- arch/powerpc/mm/slb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 0c7115fd314b..515730e499fe 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -62,16 +62,16 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, unsigned long flags, enum slb_index index) { + struct slb_shadow *p = get_slb_shadow(); + /* * Clear the ESID first so the entry is not valid while we are * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ - get_slb_shadow()->save_area[index].esid = 0; - get_slb_shadow()->save_area[index].vsid = - cpu_to_be64(mk_vsid_data(ea, ssize, flags)); - get_slb_shadow()->save_area[index].esid = - cpu_to_be64(mk_esid_data(ea, ssize, index)); + p->save_area[index].esid = 0; + p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); + p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); } static inline void slb_shadow_clear(enum slb_index index) -- cgit v1.2.3 From c974809a26a13e40254dbe3cf46f49aa32acca11 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Fri, 25 Sep 2015 14:01:40 +1000 Subject: powerpc/vdso: Avoid link stack corruption in __get_datapage() powerpc has a link register (lr) used for calling functions. We "bl " to call a function, and "blr" to return back to the call site. The lr is only a single register, so if we call another function from inside this function (ie. nested calls), software must save away the lr on the software stack before calling the new function. Before returning (ie. before the "blr"), the lr is restored by software from the software stack. This makes branch prediction quite difficult for the processor as it will only know the branch target just before the "blr". To help with this, modern powerpc processors keep a (non-architected) hardware stack of lr called a "link stack". When a "bl " is run, the lr is pushed onto this stack. When a "blr" is called, the branch predictor pops the lr value from the top of the link stack, and uses it to predict the branch target. Hence the processor pipeline knows a lot earlier the branch target. This works great but there are some cases where you call "bl" but without a matching "blr". Once such case is when trying to determine the program counter (which can't be read directly). Here you "bl+4; mflr" to get the program counter. If you do this, the link stack will get out of sync with reality, causing the branch predictor to mis-predict subsequent function returns. To avoid this, modern micro-architectures have a special case of bl. Using the form "bcl 20,31,+4", ensures the processor doesn't push to the link stack. The 32 and 64 bit variants of __get_datapage() use a "bl; mflr" to determine the loaded address of the VDSO. The current versions of these attempt to use this special bl variant. Unfortunately they use +8 rather than the required +4. Hence the current code results in the link stack getting out of sync with reality and hence the resulting performance degradation. This patch moves it to bcl+4 by moving __kernel_datapage_offset out of __get_datapage(). With this patch, running a gettimeofday() (which uses __get_datapage()) microbenchmark we get a decent bump in performance on POWER7/8. For the benchmark in tools/testing/selftests/powerpc/benchmarks/gettimeofday.c POWER8: 64bit gets ~4% improvement 32bit gets ~9% improvement POWER7: 64bit gets ~7% improvement Signed-off-by: Michael Neuling Reported-by: Aaron Sawdey Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/vdso32/datapage.S | 12 +++++++----- arch/powerpc/kernel/vdso64/datapage.S | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index dc21e891d2e7..59cf5f452879 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -16,6 +16,10 @@ #include .text + .global __kernel_datapage_offset; +__kernel_datapage_offset: + .long 0 + V_FUNCTION_BEGIN(__get_datapage) .cfi_startproc /* We don't want that exposed or overridable as we want other objects @@ -27,13 +31,11 @@ V_FUNCTION_BEGIN(__get_datapage) mflr r0 .cfi_register lr,r0 - bcl 20,31,1f - .global __kernel_datapage_offset; -__kernel_datapage_offset: - .long 0 -1: + bcl 20,31,data_page_branch +data_page_branch: mflr r3 mtlr r0 + addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) add r3,r0,r3 blr diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index 79796de11737..2f01c4a0d8a0 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -16,6 +16,10 @@ #include .text +.global __kernel_datapage_offset; +__kernel_datapage_offset: + .long 0 + V_FUNCTION_BEGIN(__get_datapage) .cfi_startproc /* We don't want that exposed or overridable as we want other objects @@ -27,13 +31,11 @@ V_FUNCTION_BEGIN(__get_datapage) mflr r0 .cfi_register lr,r0 - bcl 20,31,1f - .global __kernel_datapage_offset; -__kernel_datapage_offset: - .long 0 -1: + bcl 20,31,data_page_branch +data_page_branch: mflr r3 mtlr r0 + addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) add r3,r0,r3 blr -- cgit v1.2.3 From 65d3223a853ac8598694064c1d37b0955e7d99cc Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 3 Sep 2015 13:20:56 +0530 Subject: powerpc/mm: Add virt_to_pfn and use this instead of opencoding This add helper virt_to_pfn and remove the opencoded usage of the same. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/page.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 71294a6e976e..168ca67e39b3 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -127,9 +127,10 @@ extern long long virt_phys_offset; #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) #endif -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* * On Book-E parts we need __va to parse the device tree and we can't -- cgit v1.2.3 From 2adc48a691866fbb3134dd3abd77647a8b5a9307 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 23 Sep 2015 15:40:35 +1000 Subject: powerpc: Add ppc64le_defconfig Based directly on ppc64_defconfig using merge_config. Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index b9b4af2af9a5..3704db45a832 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -288,6 +288,10 @@ PHONY += pseries_le_defconfig pseries_le_defconfig: $(call merge_into_defconfig,pseries_defconfig,le) +PHONY += ppc64le_defconfig +ppc64le_defconfig: + $(call merge_into_defconfig,ppc64_defconfig,le) + PHONY += mpc85xx_defconfig mpc85xx_defconfig: $(call merge_into_defconfig,mpc85xx_basic_defconfig,\ -- cgit v1.2.3 From 7d523187173294f6ae3b86a48e442122b1aecc38 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Fri, 17 Jul 2015 09:19:59 +0200 Subject: powerpc/nvram: Add missing kfree in error path If 'nvram_write_header' fails, then 'new_part' should be freed, otherwise, there is a memory leak. Signed-off-by: Christophe Jaillet Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/nvram_64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 98ba106a59ef..72858e926e0a 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -1079,6 +1079,7 @@ loff_t __init nvram_create_partition(const char *name, int sig, if (rc <= 0) { pr_err("nvram_create_os_partition: nvram_write_header " "failed (%d)\n", rc); + kfree(new_part); return rc; } list_add_tail(&new_part->partition, &free_part->partition); -- cgit v1.2.3 From b6080db4f4e8bf28717b832976efc421de25b86c Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Fri, 17 Jul 2015 09:20:00 +0200 Subject: powerpc/nvram: Fix function name in some errors messages. 'nvram_create_os_partition' should be 'nvram_create_partition'. Use __func__ to have it right, as done elsewhere in this file. Signed-off-by: Christophe Jaillet Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/nvram_64.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 72858e926e0a..32e26526f7e4 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -1065,7 +1065,7 @@ loff_t __init nvram_create_partition(const char *name, int sig, /* Create our OS partition */ new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); if (!new_part) { - pr_err("nvram_create_os_partition: kmalloc failed\n"); + pr_err("%s: kmalloc failed\n", __func__); return -ENOMEM; } @@ -1077,8 +1077,7 @@ loff_t __init nvram_create_partition(const char *name, int sig, rc = nvram_write_header(new_part); if (rc <= 0) { - pr_err("nvram_create_os_partition: nvram_write_header " - "failed (%d)\n", rc); + pr_err("%s: nvram_write_header failed (%d)\n", __func__, rc); kfree(new_part); return rc; } @@ -1091,8 +1090,8 @@ loff_t __init nvram_create_partition(const char *name, int sig, free_part->header.checksum = nvram_checksum(&free_part->header); rc = nvram_write_header(free_part); if (rc <= 0) { - pr_err("nvram_create_os_partition: nvram_write_header " - "failed (%d)\n", rc); + pr_err("%s: nvram_write_header failed (%d)\n", + __func__, rc); return rc; } } else { @@ -1106,11 +1105,12 @@ loff_t __init nvram_create_partition(const char *name, int sig, tmp_index += NVRAM_BLOCK_LEN) { rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index); if (rc <= 0) { - pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc); + pr_err("%s: nvram_write failed (%d)\n", + __func__, rc); return rc; } } - + return new_part->index + NVRAM_HEADER_LEN; } -- cgit v1.2.3 From 948ad1acaf456b7213731cd9eb58654930335070 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 1 Oct 2015 12:46:06 +0300 Subject: powerpc/pseries: extract of_helpers module Extract a new module to share the code between other modules. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/Makefile | 1 + arch/powerpc/platforms/pseries/of_helpers.c | 38 +++++++++++++++++++++++++++++ arch/powerpc/platforms/pseries/of_helpers.h | 8 ++++++ arch/powerpc/platforms/pseries/reconfig.c | 34 ++------------------------ 4 files changed, 49 insertions(+), 32 deletions(-) create mode 100644 arch/powerpc/platforms/pseries/of_helpers.c create mode 100644 arch/powerpc/platforms/pseries/of_helpers.h (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 03480796af9a..2e857c2b6ea3 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ + of_helpers.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o rng.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c new file mode 100644 index 000000000000..1cbd89614484 --- /dev/null +++ b/arch/powerpc/platforms/pseries/of_helpers.c @@ -0,0 +1,38 @@ +#include +#include +#include +#include + +#include "of_helpers.h" + +/** + * pseries_of_derive_parent - basically like dirname(1) + * @path: the full_name of a node to be added to the tree + * + * Returns the node which should be the parent of the node + * described by path. E.g., for path = "/foo/bar", returns + * the node with full_name = "/foo". + */ +struct device_node *pseries_of_derive_parent(const char *path) +{ + struct device_node *parent = NULL; + char *parent_path = "/"; + size_t parent_path_len = strrchr(path, '/') - path + 1; + + /* reject if path is "/" */ + if (!strcmp(path, "/")) + return ERR_PTR(-EINVAL); + + if (strrchr(path, '/') != path) { + parent_path = kmalloc(parent_path_len, GFP_KERNEL); + if (!parent_path) + return ERR_PTR(-ENOMEM); + strlcpy(parent_path, path, parent_path_len); + } + parent = of_find_node_by_path(parent_path); + if (!parent) + return ERR_PTR(-EINVAL); + if (strcmp(parent_path, "/")) + kfree(parent_path); + return parent; +} diff --git a/arch/powerpc/platforms/pseries/of_helpers.h b/arch/powerpc/platforms/pseries/of_helpers.h new file mode 100644 index 000000000000..bb83d39aef65 --- /dev/null +++ b/arch/powerpc/platforms/pseries/of_helpers.h @@ -0,0 +1,8 @@ +#ifndef _PSERIES_OF_HELPERS_H +#define _PSERIES_OF_HELPERS_H + +#include + +struct device_node *pseries_of_derive_parent(const char *path); + +#endif /* _PSERIES_OF_HELPERS_H */ diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 0f319521e002..7c7fcc042549 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -22,37 +22,7 @@ #include #include -/** - * derive_parent - basically like dirname(1) - * @path: the full_name of a node to be added to the tree - * - * Returns the node which should be the parent of the node - * described by path. E.g., for path = "/foo/bar", returns - * the node with full_name = "/foo". - */ -static struct device_node *derive_parent(const char *path) -{ - struct device_node *parent = NULL; - char *parent_path = "/"; - size_t parent_path_len = strrchr(path, '/') - path + 1; - - /* reject if path is "/" */ - if (!strcmp(path, "/")) - return ERR_PTR(-EINVAL); - - if (strrchr(path, '/') != path) { - parent_path = kmalloc(parent_path_len, GFP_KERNEL); - if (!parent_path) - return ERR_PTR(-ENOMEM); - strlcpy(parent_path, path, parent_path_len); - } - parent = of_find_node_by_path(parent_path); - if (!parent) - return ERR_PTR(-EINVAL); - if (strcmp(parent_path, "/")) - kfree(parent_path); - return parent; -} +#include "of_helpers.h" static int pSeries_reconfig_add_node(const char *path, struct property *proplist) { @@ -71,7 +41,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist of_node_set_flag(np, OF_DYNAMIC); of_node_init(np); - np->parent = derive_parent(path); + np->parent = pseries_of_derive_parent(path); if (IS_ERR(np->parent)) { err = PTR_ERR(np->parent); goto out_err; -- cgit v1.2.3 From dc85aaed64804205c476d06cebe4c94f66fd8a20 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 1 Oct 2015 12:46:07 +0300 Subject: powerpc/pseries: fix a potential memory leak In case we have a full node name like /foo/bar and /foo is not found the parent_path left unfreed. So, free a memory before return to a caller. Signed-off-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/of_helpers.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c index 1cbd89614484..2f363e3286d1 100644 --- a/arch/powerpc/platforms/pseries/of_helpers.c +++ b/arch/powerpc/platforms/pseries/of_helpers.c @@ -15,7 +15,7 @@ */ struct device_node *pseries_of_derive_parent(const char *path) { - struct device_node *parent = NULL; + struct device_node *parent; char *parent_path = "/"; size_t parent_path_len = strrchr(path, '/') - path + 1; @@ -30,9 +30,7 @@ struct device_node *pseries_of_derive_parent(const char *path) strlcpy(parent_path, path, parent_path_len); } parent = of_find_node_by_path(parent_path); - if (!parent) - return ERR_PTR(-EINVAL); if (strcmp(parent_path, "/")) kfree(parent_path); - return parent; + return parent ? parent : ERR_PTR(-EINVAL); } -- cgit v1.2.3 From a030e1e4bbd085bbcfd0a23f8d355fcd41f39bed Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 1 Oct 2015 12:46:08 +0300 Subject: powerpc/pseries: replace kmalloc + strlcpy The helper kstrndup() will do the same in one line. Signed-off-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/of_helpers.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c index 2f363e3286d1..8c6b05ae1a1d 100644 --- a/arch/powerpc/platforms/pseries/of_helpers.c +++ b/arch/powerpc/platforms/pseries/of_helpers.c @@ -24,10 +24,9 @@ struct device_node *pseries_of_derive_parent(const char *path) return ERR_PTR(-EINVAL); if (strrchr(path, '/') != path) { - parent_path = kmalloc(parent_path_len, GFP_KERNEL); + parent_path = kstrndup(path, parent_path_len, GFP_KERNEL); if (!parent_path) return ERR_PTR(-ENOMEM); - strlcpy(parent_path, path, parent_path_len); } parent = of_find_node_by_path(parent_path); if (strcmp(parent_path, "/")) -- cgit v1.2.3 From a46d9884096c15ec17505b9b3d19f5e0672deff3 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 1 Oct 2015 12:46:09 +0300 Subject: powerpc/pseries: handle nodes without '/' In case we have node without '/' strrchr() returns NULL which might lead to crash. Replace strrchr() by kbasename() and modify condition to avoid such behaviour. Suggested-by: Segher Boessenkool Signed-off-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/of_helpers.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c index 8c6b05ae1a1d..4417afef74d4 100644 --- a/arch/powerpc/platforms/pseries/of_helpers.c +++ b/arch/powerpc/platforms/pseries/of_helpers.c @@ -17,14 +17,14 @@ struct device_node *pseries_of_derive_parent(const char *path) { struct device_node *parent; char *parent_path = "/"; - size_t parent_path_len = strrchr(path, '/') - path + 1; + const char *tail = kbasename(path); /* reject if path is "/" */ if (!strcmp(path, "/")) return ERR_PTR(-EINVAL); - if (strrchr(path, '/') != path) { - parent_path = kstrndup(path, parent_path_len, GFP_KERNEL); + if (tail > path + 1) { + parent_path = kstrndup(path, tail - path, GFP_KERNEL); if (!parent_path) return ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From 06bacefcbd5f91efb7ffedc17615fa188d6ce406 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 1 Oct 2015 12:46:10 +0300 Subject: powerpc/pseries: re-use code from of_helpers module The derive_parent() has similar semantics to what we have in newly introduced of_helpers module. The replacement reduces code base and propagates the actual error code to the caller. Signed-off-by: Andy Shevchenko Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/dlpar.c | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index db17827eb746..f244dcb4f2cf 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -18,6 +18,8 @@ #include #include #include + +#include "of_helpers.h" #include "offline_states.h" #include "pseries.h" @@ -244,36 +246,13 @@ cc_error: return first_dn; } -static struct device_node *derive_parent(const char *path) -{ - struct device_node *parent; - char *last_slash; - - last_slash = strrchr(path, '/'); - if (last_slash == path) { - parent = of_find_node_by_path("/"); - } else { - char *parent_path; - int parent_path_len = last_slash - path + 1; - parent_path = kmalloc(parent_path_len, GFP_KERNEL); - if (!parent_path) - return NULL; - - strlcpy(parent_path, path, parent_path_len); - parent = of_find_node_by_path(parent_path); - kfree(parent_path); - } - - return parent; -} - int dlpar_attach_node(struct device_node *dn) { int rc; - dn->parent = derive_parent(dn->full_name); - if (!dn->parent) - return -ENOMEM; + dn->parent = pseries_of_derive_parent(dn->full_name); + if (IS_ERR(dn->parent)) + return PTR_ERR(dn->parent); rc = of_attach_node(dn); if (rc) { -- cgit v1.2.3 From cb2d3883c6033831e2a93b396fcc43033108409c Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Wed, 16 Sep 2015 22:26:14 +0300 Subject: powerpc/msi: Free the bitmap if it was slab allocated During the MSI bitmap test on boot kmemleak spews the following trace: unreferenced object 0xc00000016e86c900 (size 64): comm "swapper/0", pid 1, jiffies 4294893173 (age 518.024s) hex dump (first 32 bytes): 00 00 01 ff 7f ff 7f 37 00 00 00 00 00 00 00 00 .......7........ ff ff ff ff ff ff ff ff 01 ff ff ff ff ff ff ff ................ backtrace: [] .zalloc_maybe_bootmem+0x3c/0x380 [] .msi_bitmap_alloc+0x3c/0xb0 [] .msi_bitmap_selftest+0x30/0x2b4 [] .do_one_initcall+0xd4/0x270 [] .kernel_init_freeable+0x1a0/0x280 [] .kernel_init+0x1c/0x120 [] .ret_from_kernel_thread+0x58/0x9c Add a flag to msi_bitmap for tracking allocations from slab and memblock so we can properly free/handle memory in msi_bitmap_free(). Signed-off-by: Denis Kirjanov Reviewed-by: Catalin Marinas [mpe: Reword changelog & use bitmap_from_slab in the if] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/msi_bitmap.h | 1 + arch/powerpc/sysdev/msi_bitmap.c | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/msi_bitmap.h b/arch/powerpc/include/asm/msi_bitmap.h index 97ac3f46ae0d..1ec7125551f1 100644 --- a/arch/powerpc/include/asm/msi_bitmap.h +++ b/arch/powerpc/include/asm/msi_bitmap.h @@ -19,6 +19,7 @@ struct msi_bitmap { unsigned long *bitmap; spinlock_t lock; unsigned int irq_count; + bool bitmap_from_slab; }; int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num); diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 73b64c73505b..1a826f3b4424 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -122,7 +123,15 @@ int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, size = BITS_TO_LONGS(irq_count) * sizeof(long); pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); - bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL); + bmp->bitmap_from_slab = slab_is_available(); + if (bmp->bitmap_from_slab) + bmp->bitmap = kzalloc(size, GFP_KERNEL); + else { + bmp->bitmap = memblock_virt_alloc(size, 0); + /* the bitmap won't be freed from memblock allocator */ + kmemleak_not_leak(bmp->bitmap); + } + if (!bmp->bitmap) { pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); return -ENOMEM; @@ -138,7 +147,8 @@ int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, void msi_bitmap_free(struct msi_bitmap *bmp) { - /* we can't free the bitmap we don't know if it's bootmem etc. */ + if (bmp->bitmap_from_slab) + kfree(bmp->bitmap); of_node_put(bmp->of_node); bmp->bitmap = NULL; } @@ -203,8 +213,6 @@ static void __init test_basics(void) /* Clients may WARN_ON bitmap == NULL for "not-allocated" */ WARN_ON(bmp.bitmap != NULL); - - kfree(bmp.bitmap); } static void __init test_of_node(void) -- cgit v1.2.3 From 255e8e046c3925ee45a869466221a48d38451b39 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 21 Aug 2015 13:05:15 +0200 Subject: powerpc/8xx: Shorten irq_chip name for the SIU show_interrupts() expects the irq_chip name to be max 8 characters otherwise everything get misaligned # cat /proc/interrupts CPU0 17: 0 CPM PIC 0 Level error 19: 0 MPC8XX SIU 15 Level tbint 20: 90 CPM PIC 4 Level cpm_uart 38: 29746 MPC8XX SIU 5 Level fs_enet-mac 39: 0 MPC8XX SIU 7 Level fs_enet-mac 47: 401 CPM PIC 5 Level fsl_spi 68: 1 MPC8XX SIU 2 Level phy_interrupt, phy_interrupt, phy_interrupt LOC: 7225485 Local timer interrupts for timer event device LOC: 9 Local timer interrupts for others SPU: 0 Spurious interrupts PMI: 0 Performance monitoring interrupts MCE: 0 Machine check exceptions Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/sysdev/mpc8xx_pic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index 9a423975853a..b7cf7abff2eb 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -61,7 +61,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) } static struct irq_chip mpc8xx_pic = { - .name = "MPC8XX SIU", + .name = "8XX SIU", .irq_unmask = mpc8xx_unmask_irq, .irq_mask = mpc8xx_mask_irq, .irq_ack = mpc8xx_ack, -- cgit v1.2.3 From 1b70386c99e997b359735c753ca1a27c6cf87847 Mon Sep 17 00:00:00 2001 From: Samuel Mendoza-Jonas Date: Wed, 22 Jul 2015 15:54:29 +1000 Subject: powerpc/kexec: Wait 1s for secondaries to enter OPAL Always include a timeout when waiting for secondary cpus to enter OPAL in the kexec path, rather than only when crashing. Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/setup.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 685b3cbe1362..a9a8fa37a555 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -187,7 +187,7 @@ static void pnv_kexec_wait_secondaries_down(void) for_each_online_cpu(i) { uint8_t status; - int64_t rc; + int64_t rc, timeout = 1000; if (i == my_cpu) continue; @@ -204,6 +204,18 @@ static void pnv_kexec_wait_secondaries_down(void) i, paca[i].hw_cpu_id); notified = i; } + + /* + * On crash secondaries might be unreachable or hung, + * so timeout if we've waited too long + * */ + mdelay(1); + if (timeout-- == 0) { + printk(KERN_ERR "kexec: timed out waiting for " + "cpu %d (physical %d) to enter OPAL\n", + i, paca[i].hw_cpu_id); + break; + } } } } @@ -225,13 +237,6 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) /* Return the CPU to OPAL */ opal_return_cpu(); - } else if (crash_shutdown) { - /* - * On crash, we don't wait for secondaries to go - * down as they might be unreachable or hung, so - * instead we just wait a bit and move on. - */ - mdelay(1); } else { /* Primary waits for the secondaries to have reached OPAL */ pnv_kexec_wait_secondaries_down(); -- cgit v1.2.3 From f78f7ed72632a0794161f290b89e2cd752f55202 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 8 Oct 2015 13:29:28 +0530 Subject: powerpc: Fix _ALIGN_* errors due to type difference. This avoid errors like unsigned int usize = 1 << 30; int size = 1 << 30; unsigned long addr = 64UL << 30 ; value = _ALIGN_DOWN(addr, usize); -> 0 value = _ALIGN_DOWN(addr, size); -> 0x1000000000 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/boot/page.h | 4 ++-- arch/powerpc/include/asm/page.h | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/boot/page.h b/arch/powerpc/boot/page.h index 14eca30fef64..87c42d7d283d 100644 --- a/arch/powerpc/boot/page.h +++ b/arch/powerpc/boot/page.h @@ -22,8 +22,8 @@ #define PAGE_MASK (~(PAGE_SIZE-1)) /* align addr on a size boundary - adjust address up/down if needed */ -#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) -#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) +#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((typeof(addr))(size)-1))) +#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) /* align addr on a size boundary - adjust address up if needed */ #define _ALIGN(addr,size) _ALIGN_UP(addr,size) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 168ca67e39b3..73b58ff6c451 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include +#include #else #include #endif @@ -241,8 +242,8 @@ extern long long virt_phys_offset; #endif /* align addr on a size boundary - adjust address up/down if needed */ -#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) -#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) +#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size) +#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) /* align addr on a size boundary - adjust address up if needed */ #define _ALIGN(addr,size) _ALIGN_UP(addr,size) -- cgit v1.2.3 From c13e1c05b22b504bf0d72fc762a02be37df2d1b0 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 8 Oct 2015 20:00:58 +0100 Subject: powerpc/pseries/hvcserver: don't memset pi_buff if it is null pi_buff is being memset before it is sanity checked. Move the memset after the null pi_buff sanity check to avoid an oops. Signed-off-by: Colin Ian King Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/hvcserver.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index eedb64594dc5..94a6e5612b0d 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -142,11 +142,11 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, int more = 1; int retval; - memset(pi_buff, 0x00, PAGE_SIZE); /* invalid parameters */ if (!head || !pi_buff) return -EINVAL; + memset(pi_buff, 0x00, PAGE_SIZE); last_p_partition_ID = last_p_unit_address = ~0UL; INIT_LIST_HEAD(head); -- cgit v1.2.3 From ec2640b114d535ba7d895b6ee353791d542f2407 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 7 Sep 2015 12:53:53 +0530 Subject: powerpc/mm: Disable hugepd for 64K page size. After commit e2b3d202d1dba8f3546ed28224ce485bc50010be ("powerpc: Switch 16GB and 16MB explicit hugepages to a different page table format"), we don't need to support is_hugepd() for 64K page size. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/page.h | 15 +++++++++++++++ arch/powerpc/mm/hugetlbpage.c | 19 +++++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 73b58ff6c451..96534b4e5a64 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -364,6 +364,20 @@ typedef struct { signed long pd; } hugepd_t; #ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_64K_PAGES +/* + * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't + * need to setup hugepage directory for them. Our pte and page directory format + * enable us to have this enabled. But to avoid errors when implementing new + * features disable hugepd for 64K. We enable a debug version here, So we catch + * wrong usage. + */ +#ifdef CONFIG_DEBUG_VM +extern int hugepd_ok(hugepd_t hpd); +#else +#define hugepd_ok(x) (0) +#endif +#else static inline int hugepd_ok(hugepd_t hpd) { /* @@ -372,6 +386,7 @@ static inline int hugepd_ok(hugepd_t hpd) */ return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); } +#endif #else static inline int hugepd_ok(hugepd_t hpd) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 06c14523b787..f093828e8997 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -89,6 +89,25 @@ int pgd_huge(pgd_t pgd) */ return ((pgd_val(pgd) & 0x3) != 0x0); } + +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM) +/* + * This enables us to catch the wrong page directory format + * Moved here so that we can use WARN() in the call. + */ +int hugepd_ok(hugepd_t hpd) +{ + bool is_hugepd; + + /* + * We should not find this format in page directory, warn otherwise. + */ + is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); + WARN(is_hugepd, "Found wrong page directory format\n"); + return 0; +} +#endif + #else int pmd_huge(pmd_t pmd) { -- cgit v1.2.3 From 891121e6c02c6242487aa4ea1d5c75b7ecdc45ee Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 9 Oct 2015 08:32:21 +0530 Subject: powerpc/mm: Differentiate between hugetlb and THP during page walk We need to properly identify whether a hugepage is an explicit or a transparent hugepage in follow_huge_addr(). We used to depend on hugepage shift argument to do that. But in some case that can result in wrong results. For ex: On finding a transparent hugepage we set hugepage shift to PMD_SHIFT. But we can end up clearing the thp pte, via pmdp_huge_get_and_clear. We do prevent reusing the pfn page via the usage of kick_all_cpus_sync(). But that happens after we updated the pte to 0. Hence in follow_huge_addr() we can find hugepage shift set, but transparent huge page check fail for a thp pte. NOTE: We fixed a variant of this race against thp split in commit 691e95fd7396905a38d98919e9c150dbc3ea21a3 ("powerpc/mm/thp: Make page table walk safe against thp split/collapse") Without this patch, we may hit the BUG_ON(flags & FOLL_GET) in follow_page_mask occasionally. In the long term, we may want to switch ppc64 64k page size config to enable CONFIG_ARCH_WANT_GENERAL_HUGETLB Reported-by: David Gibson Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mmu-hash64.h | 1 + arch/powerpc/include/asm/pgtable-ppc64.h | 10 +++++++++- arch/powerpc/include/asm/pgtable.h | 6 +++--- arch/powerpc/kernel/eeh.c | 3 ++- arch/powerpc/kernel/io-workarounds.c | 2 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 8 +++++--- arch/powerpc/kvm/e500_mmu_host.c | 2 +- arch/powerpc/mm/hash_utils_64.c | 7 ++++--- arch/powerpc/mm/hugetlbpage.c | 21 ++++++++++++++++----- arch/powerpc/mm/tlb_hash64.c | 9 +++++---- arch/powerpc/perf/callchain.c | 2 +- 12 files changed, 49 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index a82f5347540a..ba3342bbdbda 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -14,6 +14,7 @@ #include #include +#include /* * This is necessary to get the definition of PGTABLE_RANGE which we diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index fa1dfb7f7b48..3245f2d96d4f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -437,9 +437,9 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp) } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, unsigned long old_pmd); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); @@ -479,6 +479,14 @@ static inline int pmd_trans_splitting(pmd_t pmd) } extern int has_transparent_hugepage(void); +#else +static inline void hpte_do_hugepage_flush(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp, + unsigned long old_pmd) +{ + + WARN(1, "%s called with THP disabled\n", __func__); +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_large(pmd_t pmd) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 0717693c8428..b64b4212b71f 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -259,15 +259,15 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, #define has_transparent_hugepage() 0 #endif pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - unsigned *shift); + bool *is_thp, unsigned *shift); static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - unsigned *shift) + bool *is_thp, unsigned *shift) { if (!arch_irqs_disabled()) { pr_info("%s called with irq enabled\n", __func__); dump_stack(); } - return __find_linux_pte_or_hugepte(pgdir, ea, shift); + return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); } #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index e968533e3e05..00ba5de12256 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -351,7 +351,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) * worried about _PAGE_SPLITTING/collapse. Also we will not hit * page table free, because of init_mm. */ - ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); + ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, + NULL, &hugepage_shift); if (!ptep) return token; WARN_ON(hugepage_shift); diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 63d9cc4d7366..5f8613ceb97f 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -76,7 +76,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) * a page table free due to init_mm */ ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, - &hugepage_shift); + NULL, &hugepage_shift); if (ptep == NULL) paddr = 0; else { diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 1f9c0a17f445..3fc2ba784a71 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -543,7 +543,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, */ local_irq_save(flags); ptep = find_linux_pte_or_hugepte(current->mm->pgd, - hva, NULL); + hva, NULL, NULL); if (ptep) { pte = kvmppc_read_update_linux_pte(ptep, 1); if (pte_write(pte)) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index c1df9bb1e413..0bce4fffcb2e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -32,7 +32,7 @@ static void *real_vmalloc_addr(void *x) * So don't worry about THP collapse/split. Called * Only in realmode, hence won't need irq_save/restore. */ - p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL); + p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL); if (!p || !pte_present(*p)) return NULL; addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); @@ -221,10 +221,12 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, * retry via mmu_notifier_retry. */ if (realmode) - ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift); + ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL, + &hpage_shift); else { local_irq_save(irq_flags); - ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift); + ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, + &hpage_shift); } if (ptep) { pte_t pte; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 4d33e199edcc..805fee9beefa 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -476,7 +476,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, * can't run hence pfn won't change. */ local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL); + ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL); if (ptep) { pte_t pte = READ_ONCE(*ptep); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index aee70171355b..7f9616f7c479 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -994,6 +994,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap, unsigned long flags) { + bool is_thp; enum ctx_state prev_state = exception_enter(); pgd_t *pgdir; unsigned long vsid; @@ -1068,7 +1069,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, #endif /* CONFIG_PPC_64K_PAGES */ /* Get PTE and page size from page tables */ - ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); + ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); rc = 1; @@ -1088,7 +1089,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, } if (hugeshift) { - if (pmd_trans_huge(*(pmd_t *)ptep)) + if (is_thp) rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, trap, flags, ssize, psize); #ifdef CONFIG_HUGETLB_PAGE @@ -1243,7 +1244,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, * THP pages use update_mmu_cache_pmd. We don't do * hash preload there. Hence can ignore THP here */ - ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift); + ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift); if (!ptep) goto out_exit; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f093828e8997..9833fee493ec 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -128,7 +128,7 @@ int pgd_huge(pgd_t pgd) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { /* Only called for hugetlbfs pages, hence can ignore THP */ - return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL); + return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL); } static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, @@ -703,13 +703,14 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, struct page * follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { + bool is_thp; pte_t *ptep, pte; unsigned shift; unsigned long mask, flags; struct page *page = ERR_PTR(-EINVAL); local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); + ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift); if (!ptep) goto no_page; pte = READ_ONCE(*ptep); @@ -718,7 +719,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) * Transparent hugepages are handled by generic code. We can skip them * here. */ - if (!shift || pmd_trans_huge(__pmd(pte_val(pte)))) + if (!shift || is_thp) goto no_page; if (!pte_present(pte)) { @@ -975,7 +976,7 @@ void flush_dcache_icache_hugepage(struct page *page) */ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, - unsigned *shift) + bool *is_thp, unsigned *shift) { pgd_t pgd, *pgdp; pud_t pud, *pudp; @@ -987,6 +988,9 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, if (shift) *shift = 0; + if (is_thp) + *is_thp = false; + pgdp = pgdir + pgd_index(ea); pgd = READ_ONCE(*pgdp); /* @@ -1034,7 +1038,14 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, if (pmd_none(pmd)) return NULL; - if (pmd_huge(pmd) || pmd_large(pmd)) { + if (pmd_trans_huge(pmd)) { + if (is_thp) + *is_thp = true; + ret_pte = (pte_t *) pmdp; + goto out; + } + + if (pmd_huge(pmd)) { ret_pte = (pte_t *) pmdp; goto out; } else if (is_hugepd(__hugepd(pmd_val(pmd)))) diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index c522969f012d..f7b80391bee7 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -190,6 +190,7 @@ void tlb_flush(struct mmu_gather *tlb) void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, unsigned long end) { + bool is_thp; int hugepage_shift; unsigned long flags; @@ -208,21 +209,21 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, local_irq_save(flags); arch_enter_lazy_mmu_mode(); for (; start < end; start += PAGE_SIZE) { - pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, + pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp, &hugepage_shift); unsigned long pte; if (ptep == NULL) continue; pte = pte_val(*ptep); - if (hugepage_shift) + if (is_thp) trace_hugepage_invalidate(start, pte); if (!(pte & _PAGE_HASHPTE)) continue; - if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) + if (unlikely(is_thp)) hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); else - hpte_need_flush(mm, start, ptep, pte, 0); + hpte_need_flush(mm, start, ptep, pte, hugepage_shift); } arch_leave_lazy_mmu_mode(); local_irq_restore(flags); diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index ff09cde20cd2..e04a6752b399 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -127,7 +127,7 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb) return -EFAULT; local_irq_save(flags); - ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift); + ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift); if (!ptep) goto err_out; if (!shift) -- cgit v1.2.3 From 4c9cd468b348c9e47f9380a5b22196c98188a50c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Oct 2015 16:44:31 +1000 Subject: powerpc/pseries: Make PCI non-optional The pseries build with PCI=n looks to have been broken for at least 5 years, and no one's noticed or cared. Following the obvious breakages backward, the first commit I can find that builds is the parent of 2eb4afb69ff3 ("powerpc/pci: Move pseries code into pseries platform specific area") from April 2009. A distro would never ship a PCI=n kernel, so it is only useful for folks building custom kernels. Also on KVM the virtio devices appear on PCI, so it would only be useful if you were building kernels specifically to run on PowerVM and with no PCI devices. The added code complexity, and testing load (which we've clearly not been doing), is not justified by the small reduction in kernel size for such a niche use case. So just make PCI non-optional on pseries. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 54c87d5d349d..d9068a3d6af4 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -4,6 +4,7 @@ config PPC_PSERIES select HAVE_PCSPKR_PLATFORM select MPIC select OF_DYNAMIC + select PCI select PCI_MSI select PPC_XICS select PPC_ICP_NATIVE @@ -15,7 +16,6 @@ config PPC_PSERIES select RTAS_ERROR_LOGGING select PPC_UDBG_16550 select PPC_NATIVE - select PPC_PCI_CHOICE if EXPERT select PPC_DOORBELL select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP -- cgit v1.2.3 From 84eb9e612b022c8357980bbdba653d9626be2be8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Oct 2015 16:44:32 +1000 Subject: powerpc/pseries: Remove use of CONFIG_PCI Now that we always have CONFIG_PCI=y for pseries, we can stop guarding code with CONFIG_PCI ifdefs. Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/iommu.c | 10 ---------- arch/powerpc/platforms/pseries/setup.c | 4 ---- 2 files changed, 14 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 0946b98d75d4..bd98ce2be17b 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -532,7 +532,6 @@ static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); } -#ifdef CONFIG_PCI static void iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl) @@ -1292,15 +1291,6 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev) return dma_iommu_ops.get_required_mask(dev); } -#else /* CONFIG_PCI */ -#define pci_dma_bus_setup_pSeries NULL -#define pci_dma_dev_setup_pSeries NULL -#define pci_dma_bus_setup_pSeriesLP NULL -#define pci_dma_dev_setup_pSeriesLP NULL -#define dma_set_mask_pSeriesLP NULL -#define dma_get_required_mask_pSeriesLP NULL -#endif /* !CONFIG_PCI */ - static int iommu_mem_notifier(struct notifier_block *nb, unsigned lo