summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--arch/riscv/kernel/alternative.c21
-rw-r--r--arch/riscv/kernel/cacheinfo.c66
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile2
-rw-r--r--arch/riscv/kernel/cpu.c10
-rw-r--r--arch/riscv/kernel/cpufeature.c67
-rw-r--r--arch/riscv/kernel/efi-header.S19
-rw-r--r--arch/riscv/kernel/entry.S321
-rw-r--r--arch/riscv/kernel/head.h1
-rw-r--r--arch/riscv/kernel/image-vars.h2
-rw-r--r--arch/riscv/kernel/mcount-dyn.S57
-rw-r--r--arch/riscv/kernel/pi/Makefile39
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c62
-rw-r--r--arch/riscv/kernel/process.c5
-rw-r--r--arch/riscv/kernel/ptrace.c44
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c29
-rw-r--r--arch/riscv/kernel/smpboot.c1
-rw-r--r--arch/riscv/kernel/sys_riscv.c230
-rw-r--r--arch/riscv/kernel/trace_irq.c27
-rw-r--r--arch/riscv/kernel/trace_irq.h11
-rw-r--r--arch/riscv/kernel/traps.c144
-rw-r--r--arch/riscv/kernel/vdso.c6
-rw-r--r--arch/riscv/kernel/vdso/Makefile4
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c52
-rw-r--r--arch/riscv/kernel/vdso/sys_hwprobe.S15
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S3
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S35
28 files changed, 715 insertions, 564 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 67f542be1bea..571f05958b41 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -68,8 +68,6 @@ obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
-obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
-
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o
@@ -90,3 +88,5 @@ obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_COMPAT) += compat_syscall_table.o
obj-$(CONFIG_COMPAT) += compat_signal.o
obj-$(CONFIG_COMPAT) += compat_vdso/
+
+obj-$(CONFIG_64BIT) += pi/
diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c
index 2354c69dc7d1..6b75788c18e6 100644
--- a/arch/riscv/kernel/alternative.c
+++ b/arch/riscv/kernel/alternative.c
@@ -27,9 +27,11 @@ struct cpu_manufacturer_info_t {
void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
+ void (*feature_probe_func)(unsigned int cpu, unsigned long archid,
+ unsigned long impid);
};
-static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
+static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
{
#ifdef CONFIG_RISCV_M_MODE
cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
@@ -41,6 +43,7 @@ static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_inf
cpu_mfr_info->imp_id = sbi_get_mimpid();
#endif
+ cpu_mfr_info->feature_probe_func = NULL;
switch (cpu_mfr_info->vendor_id) {
#ifdef CONFIG_ERRATA_SIFIVE
case SIFIVE_VENDOR_ID:
@@ -50,6 +53,7 @@ static void __init_or_module riscv_fill_cpu_mfr_info(struct cpu_manufacturer_inf
#ifdef CONFIG_ERRATA_THEAD
case THEAD_VENDOR_ID:
cpu_mfr_info->patch_func = thead_errata_patch_func;
+ cpu_mfr_info->feature_probe_func = thead_feature_probe_func;
break;
#endif
default:
@@ -139,6 +143,20 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
}
}
+/* Called on each CPU as it starts */
+void probe_vendor_features(unsigned int cpu)
+{
+ struct cpu_manufacturer_info_t cpu_mfr_info;
+
+ riscv_fill_cpu_mfr_info(&cpu_mfr_info);
+ if (!cpu_mfr_info.feature_probe_func)
+ return;
+
+ cpu_mfr_info.feature_probe_func(cpu,
+ cpu_mfr_info.arch_id,
+ cpu_mfr_info.imp_id);
+}
+
/*
* This is called very early in the boot process (directly after we run
* a feature detect on the boot CPU). No need to worry about other CPUs
@@ -193,6 +211,7 @@ void __init apply_boot_alternatives(void)
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
+ probe_vendor_features(0);
_apply_alternatives((struct alt_entry *)__alt_start,
(struct alt_entry *)__alt_end,
RISCV_ALTERNATIVES_BOOT);
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index e3829d2de5d9..09e9b88110d1 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -63,53 +63,12 @@ uintptr_t get_cache_geometry(u32 level, enum cache_type type)
0;
}
-static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
- unsigned int level, unsigned int size,
- unsigned int sets, unsigned int line_size)
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ struct device_node *node,
+ enum cache_type type, unsigned int level)
{
this_leaf->level = level;
this_leaf->type = type;
- this_leaf->size = size;
- this_leaf->number_of_sets = sets;
- this_leaf->coherency_line_size = line_size;
-
- /*
- * If the cache is fully associative, there is no need to
- * check the other properties.
- */
- if (sets == 1)
- return;
-
- /*
- * Set the ways number for n-ways associative, make sure
- * all properties are big than zero.
- */
- if (sets > 0 && size > 0 && line_size > 0)
- this_leaf->ways_of_associativity = (size / sets) / line_size;
-}
-
-static void fill_cacheinfo(struct cacheinfo **this_leaf,
- struct device_node *node, unsigned int level)
-{
- unsigned int size, sets, line_size;
-
- if (!of_property_read_u32(node, "cache-size", &size) &&
- !of_property_read_u32(node, "cache-block-size", &line_size) &&
- !of_property_read_u32(node, "cache-sets", &sets)) {
- ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
- }
-
- if (!of_property_read_u32(node, "i-cache-size", &size) &&
- !of_property_read_u32(node, "i-cache-sets", &sets) &&
- !of_property_read_u32(node, "i-cache-block-size", &line_size)) {
- ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
- }
-
- if (!of_property_read_u32(node, "d-cache-size", &size) &&
- !of_property_read_u32(node, "d-cache-sets", &sets) &&
- !of_property_read_u32(node, "d-cache-block-size", &line_size)) {
- ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
- }
}
int populate_cache_leaves(unsigned int cpu)
@@ -120,24 +79,29 @@ int populate_cache_leaves(unsigned int cpu)
struct device_node *prev = NULL;
int levels = 1, level = 1;
- /* Level 1 caches in cpu node */
- fill_cacheinfo(&this_leaf, np, level);
+ if (of_property_read_bool(np, "cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ if (of_property_read_bool(np, "i-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ if (of_property_read_bool(np, "d-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
- /* Next level caches in cache nodes */
prev = np;
while ((np = of_find_next_cache_node(np))) {
of_node_put(prev);
prev = np;
-
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
break;
if (level <= levels)
break;
-
- fill_cacheinfo(&this_leaf, np, level);
-
+ if (of_property_read_bool(np, "cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ if (of_property_read_bool(np, "i-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ if (of_property_read_bool(np, "d-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
levels = level;
}
of_node_put(np);
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index 7f34f3c7c882..189345773e7e 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -26,7 +26,7 @@ targets := $(obj-compat_vdso) compat_vdso.so compat_vdso.so.dbg compat_vdso.lds
obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
obj-y += compat_vdso.o
-CPPFLAGS_compat_vdso.lds += -P -C -U$(ARCH)
+CPPFLAGS_compat_vdso.lds += -P -C -DCOMPAT_VDSO -U$(ARCH)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 8400f0cc9704..3df38052dcbd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/sbi.h>
@@ -70,12 +71,7 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
return -1;
}
-struct riscv_cpuinfo {
- unsigned long mvendorid;
- unsigned long marchid;
- unsigned long mimpid;
-};
-static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
{
@@ -186,11 +182,13 @@ arch_initcall(riscv_cpuinfo_init);
*/
static struct riscv_isa_ext_data isa_ext_arr[] = {
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
+ __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
+ __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
};
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 59d58ee0f68d..52585e088873 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -8,20 +8,15 @@
#include <linux/bitmap.h>
#include <linux/ctype.h>
-#include <linux/libfdt.h>
#include <linux/log2.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
-#include <asm/errata_list.h>
#include <asm/hwcap.h>
#include <asm/patch.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/smp.h>
-#include <asm/switch_to.h>
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
@@ -30,6 +25,9 @@ unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
+/* Performance information */
+DEFINE_PER_CPU(long, misaligned_access_speed);
+
/**
* riscv_isa_extension_base() - Get base extension word
*
@@ -79,6 +77,15 @@ static bool riscv_isa_extension_check(int id)
return false;
}
return true;
+ case RISCV_ISA_EXT_ZICBOZ:
+ if (!riscv_cboz_block_size) {
+ pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
+ return false;
+ } else if (!is_power_of_2(riscv_cboz_block_size)) {
+ pr_err("cboz-block-size present, but is not a power-of-2\n");
+ return false;
+ }
+ return true;
}
return true;
@@ -224,9 +231,11 @@ void __init riscv_fill_hwcap(void)
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL);
+ SET_ISA_EXT_MAP("svnapot", RISCV_ISA_EXT_SVNAPOT);
SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
SET_ISA_EXT_MAP("zbb", RISCV_ISA_EXT_ZBB);
SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM);
+ SET_ISA_EXT_MAP("zicboz", RISCV_ISA_EXT_ZICBOZ);
SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE);
}
#undef SET_ISA_EXT_MAP
@@ -269,12 +278,46 @@ void __init riscv_fill_hwcap(void)
}
#ifdef CONFIG_RISCV_ALTERNATIVE
+/*
+ * Alternative patch sites consider 48 bits when determining when to patch
+ * the old instruction sequence with the new. These bits are broken into a
+ * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
+ * patch site is for an erratum, identified by the 32-bit patch ID. When
+ * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
+ * further break down patch ID into two 16-bit numbers. The lower 16 bits
+ * are the cpufeature ID and the upper 16 bits are used for a value specific
+ * to the cpufeature and patch site. If the upper 16 bits are zero, then it
+ * implies no specific value is specified. cpufeatures that want to control
+ * patching on a per-site basis will provide non-zero values and implement
+ * checks here. The checks return true when patching should be done, and
+ * false otherwise.
+ */
+static bool riscv_cpufeature_patch_check(u16 id, u16 value)
+{
+ if (!value)
+ return true;
+
+ switch (id) {
+ case RISCV_ISA_EXT_ZICBOZ:
+ /*
+ * Zicboz alternative applications provide the maximum
+ * supported block size order, or zero when it doesn't
+ * matter. If the current block size exceeds the maximum,
+ * then the alternative cannot be applied.
+ */
+ return riscv_cboz_block_size <= (1U << value);
+ }
+
+ return false;
+}
+
void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
struct alt_entry *end,
unsigned int stage)
{
struct alt_entry *alt;
void *oldptr, *altptr;
+ u16 id, value;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return;
@@ -282,13 +325,19 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != 0)
continue;
- if (alt->errata_id >= RISCV_ISA_EXT_MAX) {
- WARN(1, "This extension id:%d is not in ISA extension list",
- alt->errata_id);
+
+ id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
+
+ if (id >= RISCV_ISA_EXT_MAX) {
+ WARN(1, "This extension id:%d is not in ISA extension list", id);
continue;
}
- if (!__riscv_isa_extension_available(NULL, alt->errata_id))
+ if (!__riscv_isa_extension_available(NULL, id))
+ continue;
+
+ value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
+ if (!riscv_cpufeature_patch_check(id, value))
continue;
oldptr = ALT_OLD_PTR(alt);
diff --git a/arch/riscv/kernel/efi-header.S b/arch/riscv/kernel/efi-header.S
index 8e733aa48ba6..515b2dfbca75 100644
--- a/arch/riscv/kernel/efi-header.S
+++ b/arch/riscv/kernel/efi-header.S
@@ -6,6 +6,7 @@
#include <linux/pe.h>
#include <linux/sizes.h>
+#include <asm/set_memory.h>
.macro __EFI_PE_HEADER
.long PE_MAGIC
@@ -33,7 +34,11 @@ optional_header:
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
.long __pecoff_text_end - efi_header_end // SizeOfCode
- .long __pecoff_data_virt_size // SizeOfInitializedData
+#ifdef __clang__
+ .long __pecoff_data_virt_size // SizeOfInitializedData
+#else
+ .long __pecoff_data_virt_end - __pecoff_text_end // SizeOfInitializedData
+#endif
.long 0 // SizeOfUninitializedData
.long __efistub_efi_pe_entry - _start // AddressOfEntryPoint
.long efi_header_end - _start // BaseOfCode
@@ -91,9 +96,17 @@ section_table:
IMAGE_SCN_MEM_EXECUTE // Characteristics
.ascii ".data\0\0\0"
- .long __pecoff_data_virt_size // VirtualSize
+#ifdef __clang__
+ .long __pecoff_data_virt_size // VirtualSize
+#else
+ .long __pecoff_data_virt_end - __pecoff_text_end // VirtualSize
+#endif
.long __pecoff_text_end - _start // VirtualAddress
- .long __pecoff_data_raw_size // SizeOfRawData
+#ifdef __clang__
+ .long __pecoff_data_raw_size // SizeOfRawData
+#else
+ .long __pecoff_data_raw_end - __pecoff_text_end // SizeOfRawData
+#endif
.long __pecoff_text_end - _start // PointerToRawData
.long 0 // PointerToRelocations
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 99d38fdf8b18..3fbb100bc9e4 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -14,11 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
-#if !IS_ENABLED(CONFIG_PREEMPTION)
-.set resume_kernel, restore_all
-#endif
-
-ENTRY(handle_exception)
+SYM_CODE_START(handle_exception)
/*
* If coming from userspace, preserve the user thread pointer and load
* the kernel thread pointer. If we came from the kernel, the scratch
@@ -46,32 +42,7 @@ _save_context:
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
- REG_S x6, PT_T1(sp)
- REG_S x7, PT_T2(sp)
- REG_S x8, PT_S0(sp)
- REG_S x9, PT_S1(sp)
- REG_S x10, PT_A0(sp)
- REG_S x11, PT_A1(sp)
- REG_S x12, PT_A2(sp)
- REG_S x13, PT_A3(sp)
- REG_S x14, PT_A4(sp)
- REG_S x15, PT_A5(sp)
- REG_S x16, PT_A6(sp)
- REG_S x17, PT_A7(sp)
- REG_S x18, PT_S2(sp)
- REG_S x19, PT_S3(sp)
- REG_S x20, PT_S4(sp)
- REG_S x21, PT_S5(sp)
- REG_S x22, PT_S6(sp)
- REG_S x23, PT_S7(sp)
- REG_S x24, PT_S8(sp)
- REG_S x25, PT_S9(sp)
- REG_S x26, PT_S10(sp)
- REG_S x27, PT_S11(sp)
- REG_S x28, PT_T3(sp)
- REG_S x29, PT_T4(sp)
- REG_S x30, PT_T5(sp)
- REG_S x31, PT_T6(sp)
+ save_from_x6_to_x31
/*
* Disable user-mode memory access as it should only be set in the
@@ -106,19 +77,8 @@ _save_context:
.option norelax
la gp, __global_pointer$
.option pop
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- call __trace_hardirqs_off
-#endif
-
-#ifdef CONFIG_CONTEXT_TRACKING_USER
- /* If previous state is in user mode, call user_exit_callable(). */
- li a0, SR_PP
- and a0, s1, a0
- bnez a0, skip_context_tracking
- call user_exit_callable
-skip_context_tracking:
-#endif
+ move a0, sp /* pt_regs */
+ la ra, ret_from_exception
/*
* MSB of cause differentiates between
@@ -126,38 +86,13 @@ skip_context_tracking:
*/
bge s4, zero, 1f
- la ra, ret_from_exception
-
/* Handle interrupts */
- move a0, sp /* pt_regs */
- la a1, generic_handle_arch_irq
- jr a1
+ tail do_irq
1:
- /*
- * Exceptions run with interrupts enabled or disabled depending on the
- * state of SR_PIE in m/sstatus.
- */
- andi t0, s1, SR_PIE
- beqz t0, 1f
- /* kprobes, entered via ebreak, must have interrupts disabled. */
- li t0, EXC_BREAKPOINT
- beq s4, t0, 1f
-#ifdef CONFIG_TRACE_IRQFLAGS
- call __trace_hardirqs_on
-#endif
- csrs CSR_STATUS, SR_IE
-
-1:
- la ra, ret_from_exception
- /* Handle syscalls */
- li t0, EXC_SYSCALL
- beq s4, t0, handle_syscall
-
/* Handle other exceptions */
slli t0, s4, RISCV_LGPTR
la t1, excp_vect_table
la t2, excp_vect_table_end
- move a0, sp /* pt_regs */
add t0, t1, t0
/* Check if exception code lies within bounds */
bgeu t0, t2, 1f
@@ -165,95 +100,16 @@ skip_context_tracking:
jr t0
1:
tail do_trap_unknown
+SYM_CODE_END(handle_exception)
-handle_syscall:
-#ifdef CONFIG_RISCV_M_MODE
- /*
- * When running is M-Mode (no MMU config), MPIE does not get set.
- * As a result, we need to force enable interrupts here because
- * handle_exception did not do set SR_IE as it always sees SR_PIE
- * being cleared.
- */
- csrs CSR_STATUS, SR_IE
-#endif
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
- /* Recover a0 - a7 for system calls */
- REG_L a0, PT_A0(sp)
- REG_L a1, PT_A1(sp)
- REG_L a2, PT_A2(sp)
- REG_L a3, PT_A3(sp)
- REG_L a4, PT_A4(sp)
- REG_L a5, PT_A5(sp)
- REG_L a6, PT_A6(sp)
- REG_L a7, PT_A7(sp)
-#endif
- /* save the initial A0 value (needed in signal handlers) */
- REG_S a0, PT_ORIG_A0(sp)
- /*
- * Advance SEPC to avoid executing the original
- * scall instruction on sret
- */
- addi s2, s2, 0x4
- REG_S s2, PT_EPC(sp)
- /* Trace syscalls, but only if requested by the user. */
- REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_WORK
- bnez t0, handle_syscall_trace_enter
-check_syscall_nr:
- /* Check to make sure we don't jump to a bogus syscall number. */
- li t0, __NR_syscalls
- la s0, sys_ni_syscall
- /*
- * Syscall number held in a7.
- * If syscall number is above allowed value, redirect to ni_syscall.
- */
- bgeu a7, t0, 3f
-#ifdef CONFIG_COMPAT
- REG_L s0, PT_STATUS(sp)
- srli s0, s0, SR_UXL_SHIFT
- andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
- li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
- sub t0, s0, t0
- bnez t0, 1f
-
- /* Call compat_syscall */
- la s0, compat_sys_call_table
- j 2f
-1:
-#endif
- /* Call syscall */
- la s0, sys_call_table
-2:
- slli t0, a7, RISCV_LGPTR
- add s0, s0, t0
- REG_L s0, 0(s0)
-3:
- jalr s0
-
-ret_from_syscall:
- /* Set user a0 to kernel a0 */
- REG_S a0, PT_A0(sp)
- /*
- * We didn't execute the actual syscall.
- * Seccomp already set return value for the current task pt_regs.
- * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
- */
-ret_from_syscall_rejected:
-#ifdef CONFIG_DEBUG_RSEQ
- move a0, sp
- call rseq_syscall
-#endif
- /* Trace syscalls, but only if requested by the user. */
- REG_L t0, TASK_TI_FLAGS(tp)
- andi t0, t0, _TIF_SYSCALL_WORK
- bnez t0, handle_syscall_trace_exit
-
+/*
+ * The ret_from_exception must be called with interrupt disabled. Here is the
+ * caller list:
+ * - handle_exception
+ * - ret_from_fork
+ */
SYM_CODE_START_NOALIGN(ret_from_exception)
REG_L s0, PT_STATUS(sp)
- csrc CSR_STATUS, SR_IE
-#ifdef CONFIG_TRACE_IRQFLAGS
- call __trace_hardirqs_off
-#endif
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
li t0, SR_MPP
@@ -261,17 +117,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
#else
andi s0, s0, SR_SPP
#endif
- bnez s0, resume_kernel
-SYM_CODE_END(ret_from_exception)
-
- /* Interrupts must be disabled here so flags are checked atomically */
- REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
- andi s1, s0, _TIF_WORK_MASK
- bnez s1, resume_userspace_slow
-resume_userspace:
-#ifdef CONFIG_CONTEXT_TRACKING_USER
- call user_enter_callable
-#endif
+ bnez s0, 1f
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
@@ -282,18 +128,7 @@ resume_userspace:
* structures again.
*/
csrw CSR_SCRATCH, tp
-
-restore_all:
-#ifdef CONFIG_TRACE_IRQFLAGS
- REG_L s1, PT_STATUS(sp)
- andi t0, s1, SR_PIE
- beqz t0, 1f
- call __trace_hardirqs_on
- j 2f
1:
- call __trace_hardirqs_off
-2:
-#endif
REG_L a0, PT_STATUS(sp)
/*
* The current load reservation is effectively part of the processor's
@@ -322,32 +157,7 @@ restore_all:
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
- REG_L x6, PT_T1(sp)
- REG_L x7, PT_T2(sp)
- REG_L x8, PT_S0(sp)
- REG_L x9, PT_S1(sp)
- REG_L x10, PT_A0(sp)
- REG_L x11, PT_A1(sp)
- REG_L x12, PT_A2(sp)
- REG_L x13, PT_A3(sp)
- REG_L x14, PT_A4(sp)
- REG_L x15, PT_A5(sp)
- REG_L x16, PT_A6(sp)
- REG_L x17, PT_A7(sp)
- REG_L x18, PT_S2(sp)
- REG_L x19, PT_S3(sp)
- REG_L x20, PT_S4(sp)
- REG_L x21, PT_S5(sp)
- REG_L x22, PT_S6(sp)
- REG_L x23, PT_S7(sp)
- REG_L x24, PT_S8(sp)
- REG_L x25, PT_S9(sp)
- REG_L x26, PT_S10(sp)
- REG_L x27, PT_S11(sp)
- REG_L x28, PT_T3(sp)
- REG_L x29, PT_T4(sp)
- REG_L x30, PT_T5(sp)
- REG_L x31, PT_T6(sp)
+ restore_from_x6_to_x31
REG_L x2, PT_SP(sp)
@@ -356,47 +166,10 @@ restore_all:
#else
sret
#endif
-
-#if IS_ENABLED(CONFIG_PREEMPTION)
-resume_kernel:
- REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
- bnez s0, restore_all
- REG_L s0, TASK_TI_FLAGS(tp)
- andi s0, s0, _TIF_NEED_RESCHED
- beqz s0, restore_all
- call preempt_schedule_irq
- j restore_all
-#endif
-
-resume_userspace_slow:
- /* Enter slow path for supplementary processing */
- move a0, sp /* pt_regs */
- move a1, s0 /* current_thread_info->flags */
- call do_work_pending
- j resume_userspace
-
-/* Slow paths for ptrace. */
-handle_syscall_trace_enter:
- move a0, sp
- call do_syscall_trace_enter
- move t0, a0
- REG_L a0, PT_A0(sp)
- REG_L a1, PT_A1(sp)
- REG_L a2, PT_A2(sp)
- REG_L a3, PT_A3(sp)
- REG_L a4, PT_A4(sp)
- REG_L a5, PT_A5(sp)
- REG_L a6, PT_A6(sp)
- REG_L a7, PT_A7(sp)
- bnez t0, ret_from_syscall_rejected
- j check_syscall_nr
-handle_syscall_trace_exit:
- move a0, sp
- call do_syscall_trace_exit
- j ret_from_exception
+SYM_CODE_END(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
-handle_kernel_stack_overflow:
+SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
/*
* Takes the psuedo-spinlock for the shadow stack, in case multiple
* harts are concurrently overflowing their kernel stacks. We could
@@ -464,32 +237,7 @@ restore_caller_reg:
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
- REG_S x6, PT_T1(sp)
- REG_S x7, PT_T2(sp)
- REG_S x8, PT_S0(sp)
- REG_S x9, PT_S1(sp)
- REG_S x10, PT_A0(sp)
- REG_S x11, PT_A1(sp)
- REG_S x12, PT_A2(sp)
- REG_S x13, PT_A3(sp)
- REG_S x14, PT_A4(sp)
- REG_S x15, PT_A5(sp)
- REG_S x16, PT_A6(sp)
- REG_S x17, PT_A7(sp)
- REG_S x18, PT_S2(sp)
- REG_S x19, PT_S3(sp)
- REG_S x20, PT_S4(sp)
- REG_S x21, PT_S5(sp)
- REG_S x22, PT_S6(sp)
- REG_S x23, PT_S7(sp)
- REG_S x24, PT_S8(sp)
- REG_S x25, PT_S9(sp)
- REG_S x26, PT_S10(sp)
- REG_S x27, PT_S11(sp)
- REG_S x28, PT_T3(sp)
- REG_S x29, PT_T4(sp)
- REG_S x30, PT_T5(sp)
- REG_S x31, PT_T6(sp)
+ save_from_x6_to_x31
REG_L s0, TASK_TI_KERNEL_SP(tp)
csrr s1, CSR_STATUS
@@ -505,23 +253,20 @@ restore_caller_reg:
REG_S s5, PT_TP(sp)
move a0, sp
tail handle_bad_stack
+SYM_CODE_END(handle_kernel_stack_overflow)
#endif
-END(handle_exception)
-
-ENTRY(ret_from_fork)
- la ra, ret_from_exception
- tail schedule_tail
-ENDPROC(ret_from_fork)
-
-ENTRY(ret_from_kernel_thread)
+SYM_CODE_START(ret_from_fork)
call schedule_tail
+ beqz s0, 1f /* not from kernel thread */
/* Call fn(arg) */
- la ra, ret_from_exception
move a0, s1
- jr s0
-ENDPROC(ret_from_kernel_thread)
-
+ jalr s0
+1:
+ move a0, sp /* pt_regs */
+ la ra, ret_from_exception
+ tail syscall_exit_to_user_mode
+SYM_CODE_END(ret_from_fork)
/*
* Integer register context switch
@@ -533,7 +278,7 @@ ENDPROC(ret_from_kernel_thread)
* The value of a0 and a1 must be preserved by this function, as that's how
* arguments are passed to schedule_tail.
*/
-ENTRY(__switch_to)
+SYM_FUNC_START(__switch_to)
/* Save context into prev->thread */
li a4, TASK_THREAD_RA
add a3, a0, a4
@@ -570,7 +315,7 @@ ENTRY(__switch_to)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
ret
-ENDPROC(__switch_to)
+SYM_FUNC_END(__switch_to)
#ifndef CONFIG_MMU
#define do_page_fault do_trap_unknown
@@ -579,7 +324,7 @@ ENDPROC(__switch_to)
.section ".rodata"
.align LGREG
/* Exception vector table */
-ENTRY(excp_vect_table)
+SYM_CODE_START(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
@@ -588,7 +333,7 @@ ENTRY(excp_vect_table)
RISCV_PTR do_trap_load_fault
RISCV_PTR do_trap_store_misaligned
RISCV_PTR do_trap_store_fault
- RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+ RISCV_PTR do_trap_ecall_u /* system call */
RISCV_PTR do_trap_ecall_s
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_ecall_m
@@ -598,11 +343,11 @@ ENTRY(excp_vect_table)
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end:
-END(excp_vect_table)
+SYM_CODE_END(excp_vect_table)
#ifndef CONFIG_MMU
-ENTRY(__user_rt_sigreturn)
+SYM_CODE_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
scall
-END(__user_rt_sigreturn)
+SYM_CODE_END(__user_rt_sigreturn)
#endif
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
index 726731ada534..a556fdaafed9 100644
--- a/arch/riscv/kernel/head.h
+++ b/arch/riscv/kernel/head.h
@@ -10,7 +10,6 @@
extern atomic_t hart_lottery;
-asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void __init setup_vm(uintptr_t dtb_pa);
#ifdef CONFIG_XIP_KERNEL
asmlinkage void __init __copy_data(void);
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
index 7e2962ef73f9..15616155008c 100644
--- a/arch/riscv/kernel/image-vars.h
+++ b/arch/riscv/kernel/image-vars.h
@@ -23,8 +23,6 @@
* linked at. The routines be