Skip to content

Commit b06bed7

Browse files
committed
arm64: debug: split single stepping exception entry
JIRA: https://issues.redhat.com/browse/RHEL-65658 commit 0ac7584 Author: Ada Couprie Diaz <ada.coupriediaz@arm.com> Date: Mon Jul 7 12:41:05 2025 +0100 arm64: debug: split single stepping exception entry Currently all debug exceptions share common entry code and are routed to `do_debug_exception()`, which calls dynamically-registered handlers for each specific debug exception. This is unfortunate as different debug exceptions have different entry handling requirements, and it would be better to handle these distinct requirements earlier. The single stepping exception has the most constraints : it can be exploited to train branch predictors and it needs special handling at EL1 for the Cortex-A76 erratum #1463225. We need to conserve all those mitigations. However, it does not write an address at FAR_EL1, as only hardware watchpoints do so. The single-step handler does its own signaling if it needs to and only returns 0, so we can call it directly from `entry-common.c`. Split the single stepping exception entry, adjust the function signature, keep the security mitigation and erratum handling. Further, as the EL0 and EL1 code paths are cleanly separated, we can split `do_softstep()` into `do_el0_softstep()` and `do_el1_softstep()` and call them directly from the relevant entry paths. We can also remove `NOKPROBE_SYMBOL` for the EL0 path, as it cannot lead to a kprobe recursion. Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that we can do it as early as possible, and only for the exceptions coming from EL0, where it is needed. This is safe to do as it is `noinstr`, as are all the functions it may call. `el0_ia()` and `el0_pc()` already call it this way. When taking a soft-step exception from EL0, most of the single stepping handling is safely preemptible : the only possible handler is `uprobe_single_step_handler()`. It only operates on task-local data and properly checks its validity, then raises a Thread Information Flag, processed before returning to userspace in `do_notify_resume()`, which is already preemptible. However, the soft-step handler first calls `reinstall_suspended_bps()` to check if there is any hardware breakpoint or watchpoint pending or already stepped through. This cannot be preempted as it manipulates the hardware breakpoint and watchpoint registers. Move the call to `try_step_suspended_breakpoints()` to `entry-common.c` and adjust the relevant comments. We can now safely unmask interrupts before handling the step itself, fixing a PREEMPT_RT issue where the handler could call a sleeping function with preemption disabled. Signed-off-by: Ada Couprie Diaz <ada.coupriediaz@arm.com> Closes: https://lore.kernel.org/linux-arm-kernel/Z6YW_Kx4S2tmj2BP@uudg.org/ Tested-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com> Reviewed-by: Will Deacon <will@kernel.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Link: https://lore.kernel.org/r/20250707114109.35672-10-ada.coupriediaz@arm.com Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
1 parent 3cb3027 commit b06bed7

File tree

4 files changed

+73
-47
lines changed

4 files changed

+73
-47
lines changed

arch/arm64/include/asm/exception.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@ void do_breakpoint(unsigned long esr, struct pt_regs *regs);
6464
#else
6565
static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
6666
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
67+
void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
68+
void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
6769
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
6870
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
6971
void do_sme_acc(unsigned long esr, struct pt_regs *regs);

arch/arm64/kernel/debug-monitors.c

Lines changed: 27 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <asm/cputype.h>
2222
#include <asm/daifflags.h>
2323
#include <asm/debug-monitors.h>
24+
#include <asm/exception.h>
2425
#include <asm/kgdb.h>
2526
#include <asm/kprobes.h>
2627
#include <asm/system_misc.h>
@@ -159,21 +160,6 @@ NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
159160
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
160161
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
161162

162-
/*
163-
* Call single step handlers
164-
* There is no Syndrome info to check for determining the handler.
165-
* However, there is only one possible handler for user and kernel modes, so
166-
* check and call the appropriate one.
167-
*/
168-
static int call_step_hook(struct pt_regs *regs, unsigned long esr)
169-
{
170-
if (user_mode(regs))
171-
return uprobe_single_step_handler(regs, esr);
172-
173-
return kgdb_single_step_handler(regs, esr);
174-
}
175-
NOKPROBE_SYMBOL(call_step_hook);
176-
177163
static void send_user_sigtrap(int si_code)
178164
{
179165
struct pt_regs *regs = current_pt_regs();
@@ -188,41 +174,38 @@ static void send_user_sigtrap(int si_code)
188174
"User debug trap");
189175
}
190176

191-
static int single_step_handler(unsigned long unused, unsigned long esr,
192-
struct pt_regs *regs)
177+
/*
178+
* We have already unmasked interrupts and enabled preemption
179+
* when calling do_el0_softstep() from entry-common.c.
180+
*/
181+
void do_el0_softstep(unsigned long esr, struct pt_regs *regs)
193182
{
183+
if (uprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
184+
return;
185+
186+
send_user_sigtrap(TRAP_TRACE);
194187
/*
195-
* If we are stepping a pending breakpoint, call the hw_breakpoint
196-
* handler first.
188+
* ptrace will disable single step unless explicitly
189+
* asked to re-enable it. For other clients, it makes
190+
* sense to leave it enabled (i.e. rewind the controls
191+
* to the active-not-pending state).
197192
*/
198-
if (try_step_suspended_breakpoints(regs))
199-
return 0;
200-
201-
if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
202-
return 0;
193+
user_rewind_single_step(current);
194+
}
203195

204-
if (user_mode(regs)) {
205-
send_user_sigtrap(TRAP_TRACE);
206-
207-
/*
208-
* ptrace will disable single step unless explicitly
209-
* asked to re-enable it. For other clients, it makes
210-
* sense to leave it enabled (i.e. rewind the controls
211-
* to the active-not-pending state).
212-
*/
213-
user_rewind_single_step(current);
214-
} else {
215-
pr_warn("Unexpected kernel single-step exception at EL1\n");
216-
/*
217-
* Re-enable stepping since we know that we will be
218-
* returning to regs.
219-
*/
220-
set_regs_spsr_ss(regs);
221-
}
196+
void do_el1_softstep(unsigned long esr, struct pt_regs *regs)
197+
{
198+
if (kgdb_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
199+
return;
222200

223-
return 0;
201+
pr_warn("Unexpected kernel single-step exception at EL1\n");
202+
/*
203+
* Re-enable stepping since we know that we will be
204+
* returning to regs.
205+
*/
206+
set_regs_spsr_ss(regs);
224207
}
225-
NOKPROBE_SYMBOL(single_step_handler);
208+
NOKPROBE_SYMBOL(do_el1_softstep);
226209

227210
static int call_break_hook(struct pt_regs *regs, unsigned long esr)
228211
{
@@ -326,8 +309,6 @@ NOKPROBE_SYMBOL(try_handle_aarch32_break);
326309

327310
void __init debug_traps_init(void)
328311
{
329-
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
330-
TRAP_TRACE, "single-step handler");
331312
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
332313
TRAP_BRKPT, "BRK handler");
333314
}

arch/arm64/kernel/entry-common.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -494,6 +494,24 @@ static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
494494
arm64_exit_el1_dbg(regs);
495495
}
496496

497+
static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
498+
{
499+
arm64_enter_el1_dbg(regs);
500+
if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
501+
debug_exception_enter(regs);
502+
/*
503+
* After handling a breakpoint, we suspend the breakpoint
504+
* and use single-step to move to the next instruction.
505+
* If we are stepping a suspended breakpoint there's nothing more to do:
506+
* the single-step is complete.
507+
*/
508+
if (!try_step_suspended_breakpoints(regs))
509+
do_el1_softstep(esr, regs);
510+
debug_exception_exit(regs);
511+
}
512+
arm64_exit_el1_dbg(regs);
513+
}
514+
497515
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
498516
{
499517
unsigned long far = read_sysreg(far_el1);
@@ -540,6 +558,8 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
540558
el1_breakpt(regs, esr);
541559
break;
542560
case ESR_ELx_EC_SOFTSTP_CUR:
561+
el1_softstp(regs, esr);
562+
break;
543563
case ESR_ELx_EC_WATCHPT_CUR:
544564
case ESR_ELx_EC_BRK64:
545565
el1_dbg(regs, esr);
@@ -738,6 +758,25 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
738758
exit_to_user_mode(regs);
739759
}
740760

761+
static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
762+
{
763+
if (!is_ttbr0_addr(regs->pc))
764+
arm64_apply_bp_hardening();
765+
766+
enter_from_user_mode(regs);
767+
/*
768+
* After handling a breakpoint, we suspend the breakpoint
769+
* and use single-step to move to the next instruction.
770+
* If we are stepping a suspended breakpoint there's nothing more to do:
771+
* the single-step is complete.
772+
*/
773+
if (!try_step_suspended_breakpoints(regs)) {
774+
local_daif_restore(DAIF_PROCCTX);
775+
do_el0_softstep(esr, regs);
776+
}
777+
exit_to_user_mode(regs);
778+
}
779+
741780
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
742781
{
743782
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
@@ -816,6 +855,8 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
816855
el0_breakpt(regs, esr);
817856
break;
818857
case ESR_ELx_EC_SOFTSTP_LOW:
858+
el0_softstp(regs, esr);
859+
break;
819860
case ESR_ELx_EC_WATCHPT_LOW:
820861
case ESR_ELx_EC_BRK64:
821862
el0_dbg(regs, esr);
@@ -938,6 +979,8 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
938979
el0_breakpt(regs, esr);
939980
break;
940981
case ESR_ELx_EC_SOFTSTP_LOW:
982+
el0_softstp(regs, esr);
983+
break;
941984
case ESR_ELx_EC_WATCHPT_LOW:
942985
case ESR_ELx_EC_BKPT32:
943986
el0_dbg(regs, esr);

arch/arm64/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -854,7 +854,7 @@ bool try_step_suspended_breakpoints(struct pt_regs *regs)
854854
bool handled_exception = false;
855855

856856
/*
857-
* Called from single-step exception handler.
857+
* Called from single-step exception entry.
858858
* Return true if we stepped a breakpoint and can resume execution,
859859
* false if we need to handle a single-step.
860860
*/

0 commit comments

Comments
 (0)