summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/arch_hweight.h24
-rw-r--r--arch/riscv/include/asm/bitops.h32
-rw-r--r--arch/riscv/include/asm/checksum.h13
-rw-r--r--arch/riscv/include/asm/cmpxchg.h12
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/hwprobe.h2
-rw-r--r--arch/riscv/include/asm/insn-def.h79
-rw-r--r--arch/riscv/include/asm/pgtable.h15
-rw-r--r--arch/riscv/include/asm/vector.h1
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h3
-rw-r--r--arch/riscv/kernel/cpufeature.c1
-rw-r--r--arch/riscv/kernel/ptrace.c24
-rw-r--r--arch/riscv/kernel/smpboot.c15
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c7
-rw-r--r--arch/riscv/kernel/vector.c2
-rw-r--r--arch/riscv/lib/csum.c53
-rw-r--r--arch/riscv/mm/init.c4
-rw-r--r--arch/riscv/mm/pgtable.c22
19 files changed, 187 insertions, 125 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c95833e80423..6b39f37f769a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -200,7 +200,7 @@ config RISCV
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
- select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select HOTPLUG_PARALLEL if HOTPLUG_CPU
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
index 0e7cdbbec8ef..f3c0831beefc 100644
--- a/arch/riscv/include/asm/arch_hweight.h
+++ b/arch/riscv/include/asm/arch_hweight.h
@@ -19,10 +19,10 @@
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
-#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!(IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)))
+ return __sw_hweight32(w);
asm (".option push\n"
".option arch,+zbb\n"
@@ -31,10 +31,6 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
: "=r" (w) : "r" (w) :);
return w;
-
-legacy:
-#endif
- return __sw_hweight32(w);
}
static inline unsigned int __arch_hweight16(unsigned int w)
@@ -50,10 +46,10 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#if BITS_PER_LONG == 64
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
-#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!(IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)))
+ return __sw_hweight64(w);
asm (".option push\n"
".option arch,+zbb\n"
@@ -62,10 +58,6 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
: "=r" (w) : "r" (w) :);
return w;
-
-legacy:
-#endif
- return __sw_hweight64(w);
}
#else /* BITS_PER_LONG == 64 */
static inline unsigned long __arch_hweight64(__u64 w)
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 77880677b06e..238092125c11 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -47,9 +47,8 @@
static __always_inline __attribute_const__ unsigned long variable__ffs(unsigned long word)
{
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
+ return generic___ffs(word);
asm volatile (".option push\n"
".option arch,+zbb\n"
@@ -58,9 +57,6 @@ static __always_inline __attribute_const__ unsigned long variable__ffs(unsigned
: "=r" (word) : "r" (word) :);
return word;
-
-legacy:
- return generic___ffs(word);
}
/**
@@ -76,9 +72,8 @@ legacy:
static __always_inline __attribute_const__ unsigned long variable__fls(unsigned long word)
{
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
+ return generic___fls(word);
asm volatile (".option push\n"
".option arch,+zbb\n"
@@ -87,9 +82,6 @@ static __always_inline __attribute_const__ unsigned long variable__fls(unsigned
: "=r" (word) : "r" (word) :);
return BITS_PER_LONG - 1 - word;
-
-legacy:
- return generic___fls(word);
}
/**
@@ -105,9 +97,8 @@ legacy:
static __always_inline __attribute_const__ int variable_ffs(int x)
{
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
+ return generic_ffs(x);
if (!x)
return 0;
@@ -119,9 +110,6 @@ static __always_inline __attribute_const__ int variable_ffs(int x)
: "=r" (x) : "r" (x) :);
return x + 1;
-
-legacy:
- return generic_ffs(x);
}
/**
@@ -137,9 +125,8 @@ legacy:
static __always_inline int variable_fls(unsigned int x)
{
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- : : : : legacy);
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
+ return generic_fls(x);
if (!x)
return 0;
@@ -151,9 +138,6 @@ static __always_inline int variable_fls(unsigned int x)
: "=r" (x) : "r" (x) :);
return 32 - x;
-
-legacy:
- return generic_fls(x);
}
/**
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index da378856f1d5..945cce34be92 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -49,16 +49,11 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
* ZBB only saves three instructions on 32-bit and five on 64-bit so not
* worth checking if supported without Alternatives.
*/
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) {
unsigned long fold_temp;
- asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- :
- :
- :
- : no_zbb);
-
if (IS_ENABLED(CONFIG_32BIT)) {
asm(".option push \n\
.option arch,+zbb \n\
@@ -81,7 +76,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
}
return (__force __sum16)(csum >> 16);
}
-no_zbb:
+
#ifndef CONFIG_32BIT
csum += ror64(csum, 32);
csum >>= 32;
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 122e1485d39a..8712cf9c69dc 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -373,9 +373,10 @@ static __always_inline void __cmpwait(volatile void *ptr,
u32 *__ptr32b;
ulong __s, __val, __mask;
- asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
- 0, RISCV_ISA_EXT_ZAWRS, 1)
- : : : : no_zawrs);
+ if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZAWRS)) {
+ ALT_RISCV_PAUSE();
+ return;
+ }
switch (size) {
case 1:
@@ -437,11 +438,6 @@ static __always_inline void __cmpwait(volatile void *ptr,
default:
BUILD_BUG();
}
-
- return;
-
-no_zawrs:
- ALT_RISCV_PAUSE();
}
#define __cmpwait_relaxed(ptr, val) \
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index f98fcb5c17d5..dfe57b215e6c 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -107,6 +107,7 @@
#define RISCV_ISA_EXT_ZALRSC 98
#define RISCV_ISA_EXT_ZICBOP 99
#define RISCV_ISA_EXT_SVRSW60T59B 100
+#define RISCV_ISA_EXT_ZALASR 101
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 58f8dda73259..8c572a464719 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 14
+#define RISCV_HWPROBE_MAX_KEY 15
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index d29da6ccd3dd..7c6daf116756 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -179,6 +179,7 @@
#define RV___RS1(v) __RV_REG(v)
#define RV___RS2(v) __RV_REG(v)
+#define RV_OPCODE_AMO RV_OPCODE(47)
#define RV_OPCODE_MISC_MEM RV_OPCODE(15)
#define RV_OPCODE_OP_IMM RV_OPCODE(19)
#define RV_OPCODE_SYSTEM RV_OPCODE(115)
@@ -208,6 +209,84 @@
__ASM_STR(.error "hlv.d requires 64-bit support")
#endif
+#define LB_AQ(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(0), FUNC7(26), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LB_AQRL(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(0), FUNC7(27), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LH_AQ(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(1), FUNC7(26), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LH_AQRL(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(1), FUNC7(27), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LW_AQ(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(2), FUNC7(26), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LW_AQRL(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(2), FUNC7(27), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define SB_RL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(0), FUNC7(29), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SB_AQRL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(0), FUNC7(31), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SH_RL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(1), FUNC7(29), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SH_AQRL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(1), FUNC7(31), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SW_RL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(2), FUNC7(29), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SW_AQRL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(2), FUNC7(31), \
+ __RD(0), RS1(addr), RS2(src))
+
+#ifdef CONFIG_64BIT
+#define LD_AQ(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(3), FUNC7(26), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define LD_AQRL(dest, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(3), FUNC7(27), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#define SD_RL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(3), FUNC7(29), \
+ __RD(0), RS1(addr), RS2(src))
+
+#define SD_AQRL(src, addr) \
+ INSN_R(OPCODE_AMO, FUNC3(3), FUNC7(31), \
+ __RD(0), RS1(addr), RS2(src))
+#else
+#define LD_AQ(dest, addr) \
+ __ASM_STR(.error "ld.aq requires 64-bit support")
+
+#define LD_AQRL(dest, addr) \
+ __ASM_STR(.error "ld.aqrl requires 64-bit support")
+
+#define SD_RL(dest, addr) \
+ __ASM_STR(.error "sd.rl requires 64-bit support")
+
+#define SD_AQRL(dest, addr) \
+ __ASM_STR(.error "sd.aqrl requires 64-bit support")
+#endif
+
#define SINVAL_VMA(vaddr, asid) \
INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \
__RD(0), RS1(vaddr), RS2(asid))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 1c311193e7da..8bd36ac842eb 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -567,8 +567,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
+ return;
/*
* The kernel assumes that TLBs don't cache invalid entries, but
@@ -580,12 +585,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
-svvptc:;
- /*
- * Svvptc guarantees that the new valid pte will be visible within
- * a bounded timeframe, so when the uarch does not cache invalid
- * entries, we don't have to do anything.
- */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index b61786d43c20..e7aa449368ad 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -51,6 +51,7 @@ void put_cpu_vector_context(void);
void riscv_v_thread_free(struct task_struct *tsk);
void __init riscv_v_setup_ctx_cache(void);
void riscv_v_thread_alloc(struct task_struct *tsk);
+void __init update_regset_vector_info(unsigned long size);
static inline u32 riscv_v_flags(void)
{
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 5d30a4fae37a..1edea2331b8b 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -82,6 +82,8 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56)
#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57)
#define RISCV_HWPROBE_EXT_ZABHA (1ULL << 58)
+#define RISCV_HWPROBE_EXT_ZALASR (1ULL << 59)
+#define RISCV_HWPROBE_EXT_ZICBOP (1ULL << 60)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -107,6 +109,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12
#define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13
#define RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0 14
+#define RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE 15
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 5441282656a7..b057362f8fb5 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -472,6 +472,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zaamo, RISCV_ISA_EXT_ZAAMO),
__RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA),
__RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+ __RISCV_ISA_EXT_DATA(zalasr, RISCV_ISA_EXT_ZALASR),
__RISCV_ISA_EXT_DATA(zalrsc, RISCV_ISA_EXT_ZALRSC),
__RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS),
__RISCV_ISA_EXT_DATA_VALIDATE(zfa, RISCV_ISA_EXT_ZFA, riscv_ext_f_depends),
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 8e86305831ea..e6272d74572f 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -153,6 +153,17 @@ static int riscv_vr_set(struct task_struct *target,
0, riscv_v_vsize);
return ret;
}
+
+static int riscv_vr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!(has_vector() || has_xtheadvector()))
+ return -ENODEV;
+
+ if (!riscv_v_vstate_query(task_pt_regs(target)))
+ return 0;
+
+ return regset->n;
+}
#endif
#ifdef CONFIG_RISCV_ISA_SUPM
@@ -184,7 +195,7 @@ static int tagged_addr_ctrl_set(struct task_struct *target,
}
#endif
-static const struct user_regset riscv_user_regset[] = {
+static struct user_regset riscv_user_regset[] __ro_after_init = {
[REGSET_X] = {
USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
@@ -207,11 +218,10 @@ static const struct user_regset riscv_user_regset[] = {
[REGSET_V] = {
USER_REGSET_NOTE_TYPE(RISCV_VECTOR),
.align = 16,
- .n = ((32 * RISCV_MAX_VLENB) +
- sizeof(struct __riscv_v_regset_state)) / sizeof(__u32),
.size = sizeof(__u32),
.regset_get = riscv_vr_get,
.set = riscv_vr_set,
+ .active = riscv_vr_active,
},
#endif
#ifdef CONFIG_RISCV_ISA_SUPM
@@ -233,6 +243,14 @@ static const struct user_regset_view riscv_user_native_view = {
.n = ARRAY_SIZE(riscv_user_regset),
};
+#ifdef CONFIG_RISCV_ISA_V
+void __init update_regset_vector_info(unsigned long size)
+{
+ riscv_user_regset[REGSET_V].n = (size + sizeof(struct __riscv_v_regset_state)) /
+ sizeof(__u32);
+}
+#endif
+
struct pt_regs_offset {
const char *name;
int offset;
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 601a321e0f17..d85916a3660c 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -39,7 +39,9 @@
#include "head.h"
+#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_running);
+#endif
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -179,6 +181,12 @@ static int start_secondary_cpu(int cpu, struct task_struct *tidle)
return -EOPNOTSUPP;
}
+#ifdef CONFIG_HOTPLUG_PARALLEL
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
+{
+ return start_secondary_cpu(cpu, tidle);
+}
+#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
@@ -199,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
+#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -225,6 +234,10 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;
+#ifdef CONFIG_HOTPLUG_PARALLEL
+ cpuhp_ap_sync_alive();
+#endif
+
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
@@ -243,7 +256,9 @@ asmlinkage __visible void smp_callin(void)
*/
local_flush_icache_all();
local_flush_tlb_all();
+#ifndef CONFIG_HOTPLUG_PARALLEL
complete(&cpu_running);
+#endif
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 199d13f86f31..0f701ace3bb9 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -109,6 +109,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZAAMO);
EXT_KEY(ZABHA);
EXT_KEY(ZACAS);
+ EXT_KEY(ZALASR);
EXT_KEY(ZALRSC);
EXT_KEY(ZAWRS);
EXT_KEY(ZBA);
@@ -122,6 +123,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZCB);
EXT_KEY(ZCMOP);
EXT_KEY(ZICBOM);
+ EXT_KEY(ZICBOP);
EXT_KEY(ZICBOZ);
EXT_KEY(ZICNTR);
EXT_KEY(ZICOND);
@@ -302,6 +304,11 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
pair->value = riscv_cbom_block_size;
break;
+ case RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOP))
+ pair->value = riscv_cbop_block_size;
+ break;
case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
pair->value = user_max_virt_addr();
break;
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 901e67adf576..3ed071dab9d8 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -66,6 +66,8 @@ void __init riscv_v_setup_ctx_cache(void)
if (!(has_vector() || has_xtheadvector()))
return;
+ update_regset_vector_info(riscv_v_vsize);
+
riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
riscv_v_vsize, 16, SLAB_PANIC,
0, riscv_v_vsize, NULL);
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
index 9408f50ca59a..75bd0abffd63 100644
--- a/arch/riscv/lib/csum.c
+++ b/arch/riscv/lib/csum.c
@@ -40,20 +40,11 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
uproto = (__force unsigned int)htonl(proto);
sum += uproto;
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) {
unsigned long fold_temp;
- /*
- * Zbb is likely available when the kernel is compiled with Zbb
- * support, so nop when Zbb is available and jump when Zbb is
- * not available.
- */
- asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- :
- :
- :
- : no_zbb);
asm(".option push \n\
.option arch,+zbb \n\
rori %[fold_temp], %[sum], 32 \n\
@@ -66,7 +57,7 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
: [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp));
return (__force __sum16)(sum >> 16);
}
-no_zbb:
+
sum += ror64(sum, 32);
sum >>= 32;
return csum_fold((__force __wsum)sum);
@@ -152,21 +143,11 @@ do_csum_with_alignment(const unsigned char *buff, int len)
csum = do_csum_common(ptr, end, data);
#ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) {
unsigned long fold_temp;
- /*
- * Zbb is likely available when the kernel is compiled with Zbb
- * support, so nop when Zbb is available and jump when Zbb is
- * not available.
- */
- asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- :
- :
- :
- : no_zbb);
-
#ifdef CONFIG_32BIT
asm_goto_output(".option push \n\
.option arch,+zbb \n\
@@ -204,7 +185,7 @@ do_csum_with_alignment(const unsigned char *buff, int len)
end:
return csum >> 16;
}
-no_zbb:
+
#endif /* CC_HAS_ASM_GOTO_TIED_OUTPUT */
#ifndef CONFIG_32BIT
csum += ror64(csum, 32);
@@ -234,21 +215,11 @@ do_csum_no_alignment(const unsigned char *buff, int len)
end = (const unsigned long *)(buff + len);
csum = do_csum_common(ptr, end, data);
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB) &&
+ riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) {
unsigned long fold_temp;
- /*
- * Zbb is likely available when the kernel is compiled with Zbb
- * support, so nop when Zbb is available and jump when Zbb is
- * not available.
- */
- asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
- RISCV_ISA_EXT_ZBB, 1)
- :
- :
- :
- : no_zbb);
-
#ifdef CONFIG_32BIT
asm (".option push \n\
.option arch,+zbb \n\
@@ -274,7 +245,7 @@ do_csum_no_alignment(const unsigned char *buff, int len)
#endif /* !CONFIG_32BIT */
return csum >> 16;
}
-no_zbb:
+
#ifndef CONFIG_32BIT
csum += ror64(csum, 32);
csum >>= 32;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index d85efe74a4b6..addb8a9305be 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -441,7 +441,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
static phys_addr_t __meminit alloc_pte_late(uintptr_t va)
{
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
/*
* We do not know which mm the PTE page is associated to at this point.
@@ -526,7 +526,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
static phys_addr_t __meminit alloc_pmd_late(uintptr_t va)
{
- struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0);
/* See comment in alloc_pte_late() regarding NULL passed the ctor */
BUG_ON(!ptdesc || !pagetable_pmd_ctor(NULL, ptdesc));
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 8b6c0a112a8d..807c0a0de182 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -9,8 +9,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
+ }
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
@@ -19,16 +27,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case.
*/
return true;
-
-svvptc:
- if (!pte_same(ptep_get(ptep), entry)) {
- __set_pte_at(vma->vm_mm, ptep, entry);
- /* Here only not svadu is impacted */
- flush_tlb_page(vma, address);
- return true;
- }
-
- return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,