diff options
Diffstat (limited to 'arch/riscv/kernel')
| -rw-r--r-- | arch/riscv/kernel/entry.S | 21 | ||||
| -rw-r--r-- | arch/riscv/kernel/module.c | 18 | ||||
| -rw-r--r-- | arch/riscv/kernel/probes/kprobes.c | 2 | ||||
| -rw-r--r-- | arch/riscv/kernel/stacktrace.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/traps.c | 6 |
5 files changed, 22 insertions, 29 deletions
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index c200d329d4bd..33a5a9f2a0d4 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -23,21 +23,21 @@ REG_S a0, TASK_TI_A0(tp) csrr a0, CSR_CAUSE /* Exclude IRQs */ - blt a0, zero, _new_vmalloc_restore_context_a0 + blt a0, zero, .Lnew_vmalloc_restore_context_a0 REG_S a1, TASK_TI_A1(tp) /* Only check new_vmalloc if we are in page/protection fault */ li a1, EXC_LOAD_PAGE_FAULT - beq a0, a1, _new_vmalloc_kernel_address + beq a0, a1, .Lnew_vmalloc_kernel_address li a1, EXC_STORE_PAGE_FAULT - beq a0, a1, _new_vmalloc_kernel_address + beq a0, a1, .Lnew_vmalloc_kernel_address li a1, EXC_INST_PAGE_FAULT - bne a0, a1, _new_vmalloc_restore_context_a1 + bne a0, a1, .Lnew_vmalloc_restore_context_a1 -_new_vmalloc_kernel_address: +.Lnew_vmalloc_kernel_address: /* Is it a kernel address? */ csrr a0, CSR_TVAL - bge a0, zero, _new_vmalloc_restore_context_a1 + bge a0, zero, .Lnew_vmalloc_restore_context_a1 /* Check if a new vmalloc mapping appeared that could explain the trap */ REG_S a2, TASK_TI_A2(tp) @@ -69,7 +69,7 @@ _new_vmalloc_kernel_address: /* Check the value of new_vmalloc for this cpu */ REG_L a2, 0(a0) and a2, a2, a1 - beq a2, zero, _new_vmalloc_restore_context + beq a2, zero, .Lnew_vmalloc_restore_context /* Atomically reset the current cpu bit in new_vmalloc */ amoxor.d a0, a1, (a0) @@ -83,11 +83,11 @@ _new_vmalloc_kernel_address: csrw CSR_SCRATCH, x0 sret -_new_vmalloc_restore_context: +.Lnew_vmalloc_restore_context: REG_L a2, TASK_TI_A2(tp) -_new_vmalloc_restore_context_a1: +.Lnew_vmalloc_restore_context_a1: REG_L a1, TASK_TI_A1(tp) -_new_vmalloc_restore_context_a0: +.Lnew_vmalloc_restore_context_a0: REG_L a0, TASK_TI_A0(tp) .endm @@ -278,6 +278,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception) #else sret #endif +SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL) SYM_CODE_END(ret_from_exception) ASM_NOKPROBE(ret_from_exception) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 1cd461f3d872..47d0ebeec93c 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -23,7 +23,7 @@ struct used_bucket { struct relocation_head { struct hlist_node node; - struct list_head *rel_entry; + struct list_head rel_entry; void *location; }; @@ -634,7 +634,7 @@ process_accumulated_relocations(struct module *me, location = rel_head_iter->location; list_for_each_entry_safe(rel_entry_iter, rel_entry_iter_tmp, - rel_head_iter->rel_entry, + &rel_head_iter->rel_entry, head) { curr_type = rel_entry_iter->type; reloc_handlers[curr_type].reloc_handler( @@ -704,16 +704,7 @@ static int add_relocation_to_accumulate(struct module *me, int type, return -ENOMEM; } - rel_head->rel_entry = - kmalloc(sizeof(struct list_head), GFP_KERNEL); - - if (!rel_head->rel_entry) { - kfree(entry); - kfree(rel_head); - return -ENOMEM; - } - - INIT_LIST_HEAD(rel_head->rel_entry); + INIT_LIST_HEAD(&rel_head->rel_entry); rel_head->location = location; INIT_HLIST_NODE(&rel_head->node); if (!current_head->first) { @@ -722,7 +713,6 @@ static int add_relocation_to_accumulate(struct module *me, int type, if (!bucket) { kfree(entry); - kfree(rel_head->rel_entry); kfree(rel_head); return -ENOMEM; } @@ -735,7 +725,7 @@ static int add_relocation_to_accumulate(struct module *me, int type, } /* Add relocation to head of discovered rel_head */ - list_add_tail(&entry->head, rel_head->rel_entry); + list_add_tail(&entry->head, &rel_head->rel_entry); return 0; } diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 380a0e8cecc0..c0738d6c6498 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -30,7 +30,7 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p) p->ainsn.api.restore = (unsigned long)p->addr + len; patch_text_nosync(p->ainsn.api.insn, &p->opcode, len); - patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn)); + patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn)); } static void __kprobes arch_prepare_simulate(struct kprobe *p) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 153a2db4c5fa..d4355c770c36 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -17,6 +17,7 @@ #ifdef CONFIG_FRAME_POINTER extern asmlinkage void handle_exception(void); +extern unsigned long ret_from_exception_end; static inline int fp_is_valid(unsigned long fp, unsigned long sp) { @@ -71,7 +72,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, fp = frame->fp; pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra, &frame->ra); - if (pc == (unsigned long)handle_exception) { + if (pc >= (unsigned long)handle_exception && + pc < (unsigned long)&ret_from_exception_end) { if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) break; diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 51ebfd23e007..8ff8e8b36524 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -35,7 +35,7 @@ int show_unhandled_signals = 1; -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) { @@ -81,7 +81,7 @@ void die(struct pt_regs *regs, const char *str) oops_enter(); - spin_lock_irqsave(&die_lock, flags); + raw_spin_lock_irqsave(&die_lock, flags); console_verbose(); bust_spinlocks(1); @@ -100,7 +100,7 @@ void die(struct pt_regs *regs, const char *str) bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irqrestore(&die_lock, flags); + raw_spin_unlock_irqrestore(&die_lock, flags); oops_exit(); if (in_interrupt()) |