treewide: context_tracking: Rename CONTEXT_* into CT_STATE_*

JIRA: https://issues.redhat.com/browse/RHEL-79879

Conflicts:
 - arch/powerpc/include/asm/interrupt.h (interrupt_enter_prepare)
   Context change due to missing commit f7bff6e7759b1a ("powerpc/64/interrupt:
   avoid BUG/WARN recursion in interrupt entry")

 - arch/powerpc/kernel/interrupt.c (interrupt_exit_kernel_prepare)
   Context change due to missing commit dc398a084d459f ("powerpc/64s/interrupt:
   Perf NMI should not take normal exit path")

 - arch/powerpc/kernel/interrupt.c (system_call_exception)
   Context change due to missing commit 1547db7d1f4481 ("powerpc: Move
   system_call_exception() to syscall.c")
   Upstream commit patches syscall.c.

 - include/linux/context_tracking.h (context_tracking_guest_exit)
   Context change due to a future commit 593377036e50de ("kvm: Note an RCU
   quiescent state on guest exit").

 - kernel/entry/common.c (__enter_from_user_mode)
   Context change due to mising commit caf4062e35b21c ("entry: Move
   enter_from_user_mode() to header file")
   Upstream commit patches include/linux/entry-common.h.

 - kernel/context_tracking.c (__ct_user_enter)
   Context change due to missing commit 0f613bfa8268a8 ("locking/atomic:
   treewide: use raw_atomic*_<op>()")

commit d65d411c9259a2499081e1e7ed91088232666b57
Author: Valentin Schneider <vschneid@redhat.com>
Date:   Tue Jul 25 12:08:50 2023 +0100

	Context tracking state related symbols currently use a mix of the
	CONTEXT_ (e.g. CONTEXT_KERNEL) and CT_SATE_ (e.g. CT_STATE_MASK) prefixes.

	Clean up the naming and make the ctx_state enum use the CT_STATE_ prefix.

	Suggested-by: Frederic Weisbecker <frederic@kernel.org>
	Signed-off-by: Valentin Schneider <vschneid@redhat.com>
	Acked-by: Frederic Weisbecker <frederic@kernel.org>
	Acked-by: Thomas Gleixner <tglx@linutronix.de>
	Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>

Signed-off-by: Čestmír Kalina <ckalina@redhat.com>
This commit is contained in:
Čestmír Kalina 2025-07-10 06:54:23 +02:00
parent 4daa520275
commit 3bc3bda315
10 changed files with 38 additions and 38 deletions

View File

@ -855,7 +855,7 @@ config HAVE_CONTEXT_TRACKING_USER_OFFSTACK
Architecture neither relies on exception_enter()/exception_exit()
nor on schedule_user(). Also preempt_schedule_notrace() and
preempt_schedule_irq() can't be called in a preemptible section
while context tracking is CONTEXT_USER. This feature reflects a sane
while context tracking is CT_STATE_USER. This feature reflects a sane
entry implementation where the following requirements are met on
critical entry code, ie: before user_exit() or after user_enter():

View File

@ -103,7 +103,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
static __always_inline void __enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
mte_disable_tco_entry(current);

View File

@ -160,7 +160,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (user_mode(regs)) {
kuap_lock();
CT_WARN_ON(ct_state() != CONTEXT_USER);
CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
account_cpu_user_entry();
@ -172,8 +172,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
ct_state() != CONTEXT_IDLE);
CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
ct_state() != CT_STATE_IDLE);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(is_implicit_soft_masked(regs));
}

View File

@ -93,7 +93,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
trace_hardirqs_off(); /* finish reconciling */
CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
CT_WARN_ON(ct_state() == CT_STATE_KERNEL);
user_exit_irqoff();
BUG_ON(regs_is_unrecoverable(regs));
@ -417,7 +417,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
unsigned long ret = 0;
bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
CT_WARN_ON(ct_state() == CONTEXT_USER);
CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap_assert_locked();
@ -495,7 +495,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
BUG_ON(regs_is_unrecoverable(regs));
BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
CT_WARN_ON(ct_state() == CT_STATE_USER);
/*
* We don't need to restore AMR on the way back to userspace for KUAP.
@ -531,7 +531,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM)
CT_WARN_ON(ct_state() == CONTEXT_USER);
CT_WARN_ON(ct_state() == CT_STATE_USER);
kuap = kuap_get_and_assert_locked();

View File

@ -150,7 +150,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline);
#endif
/*
* Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
* Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
*/
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{

View File

@ -26,26 +26,26 @@ extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
ct_user_enter(CONTEXT_USER);
ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
ct_user_exit(CONTEXT_USER);
ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
__ct_user_enter(CONTEXT_USER);
__ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
__ct_user_exit(CONTEXT_USER);
__ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL)
if (prev_ctx != CT_STATE_KERNEL)
ct_user_exit(prev_ctx);
return prev_ctx;
@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
{
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
if (prev_ctx != CT_STATE_KERNEL)
ct_user_enter(prev_ctx);
}
}
@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
__ct_user_enter(CONTEXT_GUEST);
__ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void)
static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
__ct_user_exit(CONTEXT_GUEST);
__ct_user_exit(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}

View File

@ -10,18 +10,18 @@
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_IDLE = 1,
CONTEXT_USER = 2,
CONTEXT_GUEST = 3,
CONTEXT_MAX = 4,
CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
CT_STATE_KERNEL = 0,
CT_STATE_IDLE = 1,
CT_STATE_USER = 2,
CT_STATE_GUEST = 3,
CT_STATE_MAX = 4,
};
/* Even value for idle, else odd. */
#define RCU_DYNTICKS_IDX CONTEXT_MAX
#define RCU_DYNTICKS_IDX CT_STATE_MAX
#define CT_STATE_MASK (CONTEXT_MAX - 1)
#define CT_STATE_MASK (CT_STATE_MAX - 1)
#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
struct context_tracking {
@ -121,14 +121,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
* CT_STATE_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
return CONTEXT_DISABLED;
return CT_STATE_DISABLED;
preempt_disable();
ret = __ct_state();

View File

@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
void noinstr ct_idle_enter(void)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
ct_kernel_exit(false, RCU_DYNTICKS_IDX + CONTEXT_IDLE);
ct_kernel_exit(false, RCU_DYNTICKS_IDX + CT_STATE_IDLE);
}
EXPORT_SYMBOL_GPL(ct_idle_enter);
@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
unsigned long flags;
raw_local_irq_save(flags);
ct_kernel_enter(false, RCU_DYNTICKS_IDX - CONTEXT_IDLE);
ct_kernel_enter(false, RCU_DYNTICKS_IDX - CT_STATE_IDLE);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ct_idle_exit);
@ -483,7 +483,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* user_exit() or ct_irq_enter(). Let's remove RCU's dependency
* on the tick.
*/
if (state == CONTEXT_USER) {
if (state == CT_STATE_USER) {
instrumentation_begin();
trace_user_enter(0);
vtime_user_enter(current);
@ -617,7 +617,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* run a RCU read side critical section anytime.
*/
ct_kernel_enter(true, RCU_DYNTICKS_IDX - state);
if (state == CONTEXT_USER) {
if (state == CT_STATE_USER) {
instrumentation_begin();
vtime_user_exit(current);
trace_user_exit(0);
@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering.
*/
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
arch_atomic_set(&ct->state, CONTEXT_KERNEL);
arch_atomic_set(&ct->state, CT_STATE_KERNEL);
} else {
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
arch_atomic_set(&ct->state, CONTEXT_KERNEL);
arch_atomic_set(&ct->state, CT_STATE_KERNEL);
} else {
/*
* Tracking for vtime and RCU EQS. Make sure we don't race

View File

@ -21,7 +21,7 @@ static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
arch_enter_from_user_mode(regs);
lockdep_hardirqs_off(CALLER_ADDR0);
CT_WARN_ON(ct_state() != CONTEXT_USER);
CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
instrumentation_begin();
@ -260,7 +260,7 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))

View File

@ -5803,7 +5803,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
preempt_count_set(PREEMPT_DISABLED);
}
rcu_sleep_check();
SCHED_WARN_ON(ct_state() == CONTEXT_USER);
SCHED_WARN_ON(ct_state() == CT_STATE_USER);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@ -6699,7 +6699,7 @@ asmlinkage __visible void __sched schedule_user(void)
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
* should warn if prev_state != CONTEXT_USER, but that will trigger
* should warn if prev_state != CT_STATE_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();