summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts2
-rw-r--r--arch/arm64/include/asm/el2_setup.h4
-rw-r--r--arch/arm64/kernel/hyp-stub.S4
-rw-r--r--arch/arm64/kernel/signal.c48
-rw-r--r--arch/arm64/kernel/stacktrace.c32
-rw-r--r--arch/arm64/kvm/at.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c12
9 files changed, 69 insertions, 51 deletions
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 19973ab4ea6b..9e10d7a6b5a2 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -233,7 +233,7 @@
#interrupt-cells = <0x1>;
compatible = "pci-host-ecam-generic";
device_type = "pci";
- bus-range = <0x0 0x1>;
+ bus-range = <0x0 0xff>;
reg = <0x0 0x40000000 0x0 0x10000000>;
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 85ef966c08cd..4ef52d7245bb 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -87,7 +87,7 @@
1 << PMSCR_EL2_PA_SHIFT)
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
.Lskip_spe_el2_\@:
- mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ mov x0, #MDCR_EL2_E2PB_MASK
orr x2, x2, x0 // If we don't have VHE, then
// use EL1&0 translation.
@@ -100,7 +100,7 @@
and x0, x0, TRBIDR_EL1_P
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
- mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
+ mov x0, #MDCR_EL2_E2TB_MASK
orr x2, x2, x0 // allow the EL1&0 translation
// to own it.
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 65f76064c86b..ae990da1eae5 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -114,8 +114,8 @@ SYM_CODE_START_LOCAL(__finalise_el2)
// Use EL2 translations for SPE & TRBE and disable access from EL1
mrs x0, mdcr_el2
- bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
- bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
+ bic x0, x0, #MDCR_EL2_E2PB_MASK
+ bic x0, x0, #MDCR_EL2_E2TB_MASK
msr mdcr_el2, x0
// Transfer the MM state from EL1 to EL2
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 14ac6fdb872b..37e24f1bd227 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -1462,10 +1462,33 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
struct rt_sigframe_user_layout *user, int usig)
{
__sigrestore_t sigtramp;
+ int err;
+
+ if (ksig->ka.sa.sa_flags & SA_RESTORER)
+ sigtramp = ksig->ka.sa.sa_restorer;
+ else
+ sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
+
+ err = gcs_signal_entry(sigtramp, ksig);
+ if (err)
+ return err;
+
+ /*
+ * We must not fail from this point onwards. We are going to update
+ * registers, including SP, in order to invoke the signal handler. If
+ * we failed and attempted to deliver a nested SIGSEGV to a handler
+ * after that point, the subsequent sigreturn would end up restoring
+ * the (partial) state for the original signal handler.
+ */
regs->regs[0] = usig;
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ regs->regs[1] = (unsigned long)&user->sigframe->info;
+ regs->regs[2] = (unsigned long)&user->sigframe->uc;
+ }
regs->sp = (unsigned long)user->sigframe;
regs->regs[29] = (unsigned long)&user->next_frame->fp;
+ regs->regs[30] = (unsigned long)sigtramp;
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
/*
@@ -1506,14 +1529,7 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
sme_smstop();
}
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- sigtramp = ksig->ka.sa.sa_restorer;
- else
- sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
-
- regs->regs[30] = (unsigned long)sigtramp;
-
- return gcs_signal_entry(sigtramp, ksig);
+ return 0;
}
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
@@ -1537,14 +1553,16 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigframe(&user, regs, set, &ua_state);
- if (err == 0) {
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ if (err == 0)
err = setup_return(regs, ksig, &user, usig);
- if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- regs->regs[1] = (unsigned long)&frame->info;
- regs->regs[2] = (unsigned long)&frame->uc;
- }
- }
+
+ /*
+ * We must not fail if setup_return() succeeded - see comment at the
+ * beginning of setup_return().
+ */
if (err == 0)
set_handler_user_access_state();
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index caef85462acb..1d9d51d7627f 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -26,7 +26,6 @@ enum kunwind_source {
KUNWIND_SOURCE_CALLER,
KUNWIND_SOURCE_TASK,
KUNWIND_SOURCE_REGS_PC,
- KUNWIND_SOURCE_REGS_LR,
};
union unwind_flags {
@@ -138,8 +137,10 @@ kunwind_recover_return_address(struct kunwind_state *state)
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->common.pc,
(void *)state->common.fp);
- if (WARN_ON_ONCE(state->common.pc == orig_pc))
+ if (state->common.pc == orig_pc) {
+ WARN_ON_ONCE(state->task == current);
return -EINVAL;
+ }
state->common.pc = orig_pc;
state->flags.fgraph = 1;
}
@@ -178,23 +179,8 @@ int kunwind_next_regs_pc(struct kunwind_state *state)
state->regs = regs;
state->common.pc = regs->pc;
state->common.fp = regs->regs[29];
- state->source = KUNWIND_SOURCE_REGS_PC;
- return 0;
-}
-
-static __always_inline int
-kunwind_next_regs_lr(struct kunwind_state *state)
-{
- /*
- * The stack for the regs was consumed by kunwind_next_regs_pc(), so we
- * cannot consume that again here, but we know the regs are safe to
- * access.
- */
- state->common.pc = state->regs->regs[30];
- state->common.fp = state->regs->regs[29];
state->regs = NULL;
- state->source = KUNWIND_SOURCE_REGS_LR;
-
+ state->source = KUNWIND_SOURCE_REGS_PC;
return 0;
}
@@ -215,12 +201,12 @@ kunwind_next_frame_record_meta(struct kunwind_state *state)
case FRAME_META_TYPE_FINAL:
if (meta == &task_pt_regs(tsk)->stackframe)
return -ENOENT;
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(tsk == current);
return -EINVAL;
case FRAME_META_TYPE_PT_REGS:
return kunwind_next_regs_pc(state);
default:
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(tsk == current);
return -EINVAL;
}
}
@@ -274,11 +260,8 @@ kunwind_next(struct kunwind_state *state)
case KUNWIND_SOURCE_FRAME:
case KUNWIND_SOURCE_CALLER:
case KUNWIND_SOURCE_TASK:
- case KUNWIND_SOURCE_REGS_LR:
- err = kunwind_next_frame_record(state);
- break;
case KUNWIND_SOURCE_REGS_PC:
- err = kunwind_next_regs_lr(state);
+ err = kunwind_next_frame_record(state);
break;
default:
err = -EINVAL;
@@ -436,7 +419,6 @@ static const char *state_source_string(const struct kunwind_state *state)
case KUNWIND_SOURCE_CALLER: return "C";
case KUNWIND_SOURCE_TASK: return "T";
case KUNWIND_SOURCE_REGS_PC: return "P";
- case KUNWIND_SOURCE_REGS_LR: return "L";
default: return "U";
}
}
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 8c5d7990e5b3..3d7eb395e33d 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -739,8 +739,15 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
final_attr = s1_parattr;
break;
default:
- /* MemAttr[2]=0, Device from S2 */
- final_attr = s2_memattr & GENMASK(1,0) << 2;
+ /*
+ * MemAttr[2]=0, Device from S2.
+ *
+ * FWB does not influence the way that stage 1
+ * memory types and attributes are combined
+ * with stage 2 Device type and attributes.
+ */
+ final_attr = min(s2_memattr_to_attr(s2_memattr),
+ s1_parattr);
}
} else {
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 01616c39a810..071993c16de8 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -126,7 +126,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
/* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS;
- mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
+ mdcr_clear |= MDCR_EL2_E2PB_MASK;
}
/* Trap Trace Filter */
@@ -143,7 +143,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
/* Trap External Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
- mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
+ mdcr_clear |= MDCR_EL2_E2TB_MASK;
vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 83c6b4a07ef5..e2a5c2918d9e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2618,7 +2618,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
ID_AA64MMFR0_EL1_TGRAN4_2 |
ID_AA64MMFR0_EL1_TGRAN64_2 |
- ID_AA64MMFR0_EL1_TGRAN16_2)),
+ ID_AA64MMFR0_EL1_TGRAN16_2 |
+ ID_AA64MMFR0_EL1_ASIDBITS)),
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
ID_AA64MMFR1_EL1_HCX |
ID_AA64MMFR1_EL1_TWED |
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index f4c4494645c3..fb96802799c6 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -608,12 +608,22 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
lockdep_assert_held(&its->its_lock);
vgic_get_irq_kref(irq);
+ old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
+
+ /*
+ * Put the reference taken on @irq if the store fails. Intentionally do
+ * not return the error as the translation cache is best effort.
+ */
+ if (xa_is_err(old)) {
+ vgic_put_irq(kvm, irq);
+ return;
+ }
+
/*
* We could have raced with another CPU caching the same
* translation behind our back, ensure we don't leak a
* reference if that is the case.
*/
- old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
if (old)
vgic_put_irq(kvm, old);
}