Merge: Update locking/futex to v6.12
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/7113 JIRA: https://issues.redhat.com/browse/RHEL-79880 Tested: selftests, locktorture Signed-off-by: Čestmír Kalina <ckalina@redhat.com> Approved-by: Waiman Long <longman@redhat.com> Approved-by: Michael Petlan <mpetlan@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Jarod Wilson <jarod@redhat.com>
This commit is contained in:
commit
ff296bfccf
|
@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
|
|||
static __always_inline unsigned \
|
||||
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
|
||||
{ \
|
||||
unsigned seq = READ_ONCE(s->seqcount.sequence); \
|
||||
unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
|
||||
\
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
|
||||
return seq; \
|
||||
|
@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
|
|||
* Re-read the sequence counter since the (possibly \
|
||||
* preempted) writer made progress. \
|
||||
*/ \
|
||||
seq = READ_ONCE(s->seqcount.sequence); \
|
||||
seq = smp_load_acquire(&s->seqcount.sequence); \
|
||||
} \
|
||||
\
|
||||
return seq; \
|
||||
|
@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
|
|||
|
||||
static inline unsigned __seqprop_sequence(const seqcount_t *s)
|
||||
{
|
||||
return READ_ONCE(s->sequence);
|
||||
return smp_load_acquire(&s->sequence);
|
||||
}
|
||||
|
||||
static inline bool __seqprop_preemptible(const seqcount_t *s)
|
||||
|
@ -263,17 +263,9 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
|||
#define seqprop_assert(s) __seqprop(s, assert)(s)
|
||||
|
||||
/**
|
||||
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
|
||||
* __read_seqcount_begin() - begin a seqcount_t read section
|
||||
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
|
||||
*
|
||||
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
|
||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||
* provided before actually loading any of the variables that are to be
|
||||
* protected in this critical section.
|
||||
*
|
||||
* Use carefully, only in critical code, and comment how the barrier is
|
||||
* provided.
|
||||
*
|
||||
* Return: count to be passed to read_seqcount_retry()
|
||||
*/
|
||||
#define __read_seqcount_begin(s) \
|
||||
|
@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
|||
*
|
||||
* Return: count to be passed to read_seqcount_retry()
|
||||
*/
|
||||
#define raw_read_seqcount_begin(s) \
|
||||
({ \
|
||||
unsigned _seq = __read_seqcount_begin(s); \
|
||||
\
|
||||
smp_rmb(); \
|
||||
_seq; \
|
||||
})
|
||||
#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
|
||||
|
||||
/**
|
||||
* read_seqcount_begin() - begin a seqcount_t read critical section
|
||||
|
@ -328,7 +314,6 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
|||
({ \
|
||||
unsigned __seq = seqprop_sequence(s); \
|
||||
\
|
||||
smp_rmb(); \
|
||||
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
|
||||
__seq; \
|
||||
})
|
||||
|
|
|
@ -576,8 +576,10 @@ static struct lock_trace *save_trace(void)
|
|||
if (!debug_locks_off_graph_unlock())
|
||||
return NULL;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -788,7 +790,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
|
|||
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
|
||||
else
|
||||
printk("%d lock%s held by %s/%d:\n", depth,
|
||||
depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
|
||||
str_plural(depth), p->comm, task_pid_nr(p));
|
||||
/*
|
||||
* It's not reliable to print a task's held locks if it's not sleeping
|
||||
* and it's not the current task.
|
||||
|
@ -890,11 +892,13 @@ look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
|
|||
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
||||
instrumentation_begin();
|
||||
debug_locks_off();
|
||||
nbcon_cpu_emergency_enter();
|
||||
printk(KERN_ERR
|
||||
"BUG: looking up invalid subclass: %u\n", subclass);
|
||||
printk(KERN_ERR
|
||||
"turning off the locking correctness validator.\n");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
instrumentation_end();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -971,11 +975,13 @@ static bool assign_lock_key(struct lockdep_map *lock)
|
|||
else {
|
||||
/* Debug-check: all keys must be persistent! */
|
||||
debug_locks_off();
|
||||
nbcon_cpu_emergency_enter();
|
||||
pr_err("INFO: trying to register non-static key.\n");
|
||||
pr_err("The code is fine but needs lockdep annotation, or maybe\n");
|
||||
pr_err("you didn't initialize this object before use?\n");
|
||||
pr_err("turning off the locking correctness validator.\n");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1319,8 +1325,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
return NULL;
|
||||
}
|
||||
nr_lock_classes++;
|
||||
|
@ -1352,11 +1360,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|||
if (verbose(class)) {
|
||||
graph_unlock();
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
printk("\nnew class %px: %s", class->key, class->name);
|
||||
if (class->name_version > 1)
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
printk(KERN_CONT "\n");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
|
||||
if (!graph_lock()) {
|
||||
return NULL;
|
||||
|
@ -1395,8 +1405,10 @@ static struct lock_list *alloc_list_entry(void)
|
|||
if (!debug_locks_off_graph_unlock())
|
||||
return NULL;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
return NULL;
|
||||
}
|
||||
nr_list_entries++;
|
||||
|
@ -2042,6 +2054,8 @@ static noinline void print_circular_bug(struct lock_list *this,
|
|||
|
||||
depth = get_lock_depth(target);
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
print_circular_bug_header(target, depth, check_src, check_tgt);
|
||||
|
||||
parent = get_lock_parent(target);
|
||||
|
@ -2060,6 +2074,8 @@ static noinline void print_circular_bug(struct lock_list *this,
|
|||
|
||||
printk("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static noinline void print_bfs_bug(int ret)
|
||||
|
@ -2070,6 +2086,9 @@ static noinline void print_bfs_bug(int ret)
|
|||
/*
|
||||
* Breadth-first-search failed, graph got corrupted?
|
||||
*/
|
||||
if (ret == BFS_EQUEUEFULL)
|
||||
pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
|
||||
|
||||
WARN(1, "lockdep bfs error:%d\n", ret);
|
||||
}
|
||||
|
||||
|
@ -2572,6 +2591,8 @@ print_bad_irq_dependency(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("=====================================================\n");
|
||||
pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
|
||||
|
@ -2621,11 +2642,13 @@ print_bad_irq_dependency(struct task_struct *curr,
|
|||
pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
|
||||
next_root->trace = save_trace();
|
||||
if (!next_root->trace)
|
||||
return;
|
||||
goto out;
|
||||
print_shortest_lock_dependencies(forwards_entry, next_root);
|
||||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
out:
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static const char *state_names[] = {
|
||||
|
@ -2990,6 +3013,8 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("============================================\n");
|
||||
pr_warn("WARNING: possible recursive locking detected\n");
|
||||
|
@ -3012,6 +3037,8 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3609,6 +3636,8 @@ static void print_collision(struct task_struct *curr,
|
|||
struct held_lock *hlock_next,
|
||||
struct lock_chain *chain)
|
||||
{
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("============================\n");
|
||||
pr_warn("WARNING: chain_key collision\n");
|
||||
|
@ -3625,6 +3654,8 @@ static void print_collision(struct task_struct *curr,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -3715,8 +3746,10 @@ static inline int add_chain_cache(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
return 0;
|
||||
}
|
||||
chain->chain_key = chain_key;
|
||||
|
@ -3733,8 +3766,10 @@ static inline int add_chain_cache(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock())
|
||||
return 0;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4039,6 +4074,8 @@ print_irq_inversion_bug(struct task_struct *curr,
|
|||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("========================================================\n");
|
||||
pr_warn("WARNING: possible irq lock inversion dependency detected\n");
|
||||
|
@ -4079,11 +4116,13 @@ print_irq_inversion_bug(struct task_struct *curr,
|
|||
pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
|
||||
root->trace = save_trace();
|
||||
if (!root->trace)
|
||||
return;
|
||||
goto out;
|
||||
print_shortest_lock_dependencies(other, root);
|
||||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
out:
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4160,6 +4199,8 @@ void print_irqtrace_events(struct task_struct *curr)
|
|||
{
|
||||
const struct irqtrace_events *trace = &curr->irqtrace;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
printk("irq event stamp: %u\n", trace->irq_events);
|
||||
printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
|
||||
trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
|
||||
|
@ -4173,6 +4214,8 @@ void print_irqtrace_events(struct task_struct *curr)
|
|||
printk("softirqs last disabled at (%u): [<%px>] %pS\n",
|
||||
trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
|
||||
(void *)trace->softirq_disable_ip);
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static int HARDIRQ_verbose(struct lock_class *class)
|
||||
|
@ -4693,10 +4736,12 @@ unlock:
|
|||
* We must printk outside of the graph_lock:
|
||||
*/
|
||||
if (ret == 2) {
|
||||
nbcon_cpu_emergency_enter();
|
||||
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
|
||||
print_lock(this);
|
||||
print_irqtrace_events(curr);
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -4737,6 +4782,8 @@ print_lock_invalid_wait_context(struct task_struct *curr,
|
|||
if (debug_locks_silent)
|
||||
return 0;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("=============================\n");
|
||||
pr_warn("[ BUG: Invalid wait context ]\n");
|
||||
|
@ -4756,6 +4803,8 @@ print_lock_invalid_wait_context(struct task_struct *curr,
|
|||
pr_warn("stack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4963,6 +5012,8 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("==================================\n");
|
||||
pr_warn("WARNING: Nested lock was not taken\n");
|
||||
|
@ -4983,6 +5034,8 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static int __lock_is_held(const struct lockdep_map *lock, int read);
|
||||
|
@ -5031,11 +5084,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
debug_class_ops_inc(class);
|
||||
|
||||
if (very_verbose(class)) {
|
||||
nbcon_cpu_emergency_enter();
|
||||
printk("\nacquire class [%px] %s", class->key, class->name);
|
||||
if (class->name_version > 1)
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
printk(KERN_CONT "\n");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5162,6 +5217,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
#endif
|
||||
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
|
||||
debug_locks_off();
|
||||
nbcon_cpu_emergency_enter();
|
||||
print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
|
||||
printk(KERN_DEBUG "depth: %i max: %lu!\n",
|
||||
curr->lockdep_depth, MAX_LOCK_DEPTH);
|
||||
|
@ -5169,6 +5225,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
lockdep_print_held_locks(current);
|
||||
debug_show_all_locks();
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5188,6 +5245,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("=====================================\n");
|
||||
pr_warn("WARNING: bad unlock balance detected!\n");
|
||||
|
@ -5204,6 +5263,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static noinstr int match_held_lock(const struct held_lock *hlock,
|
||||
|
@ -5916,6 +5977,8 @@ static void print_lock_contention_bug(struct task_struct *curr,
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("=================================\n");
|
||||
pr_warn("WARNING: bad contention detected!\n");
|
||||
|
@ -5932,6 +5995,8 @@ static void print_lock_contention_bug(struct task_struct *curr,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -6211,25 +6276,27 @@ static struct pending_free *get_pending_free(void)
|
|||
static void free_zapped_rcu(struct rcu_head *cb);
|
||||
|
||||
/*
|
||||
* Schedule an RCU callback if no RCU callback is pending. Must be called with
|
||||
* the graph lock held.
|
||||
*/
|
||||
static void call_rcu_zapped(struct pending_free *pf)
|
||||
* See if we need to queue an RCU callback, must called with
|
||||
* the lockdep lock held, returns false if either we don't have
|
||||
* any pending free or the callback is already scheduled.
|
||||
* Otherwise, a call_rcu() must follow this function call.
|
||||
*/
|
||||
static bool prepare_call_rcu_zapped(struct pending_free *pf)
|
||||
{
|
||||
WARN_ON_ONCE(inside_selftest());
|
||||
|
||||
if (list_empty(&pf->zapped))
|
||||
return;
|
||||
return false;
|
||||
|
||||
if (delayed_free.scheduled)
|
||||
return;
|
||||
return false;
|
||||
|
||||
delayed_free.scheduled = true;
|
||||
|
||||
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
|
||||
delayed_free.index ^= 1;
|
||||
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* The caller must hold the graph lock. May be called from RCU context. */
|
||||
|
@ -6255,6 +6322,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||
{
|
||||
struct pending_free *pf;
|
||||
unsigned long flags;
|
||||
bool need_callback;
|
||||
|
||||
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
|
||||
return;
|
||||
|
@ -6266,14 +6334,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||
pf = delayed_free.pf + (delayed_free.index ^ 1);
|
||||
__free_zapped_classes(pf);
|
||||
delayed_free.scheduled = false;
|
||||
|
||||
/*
|
||||
* If there's anything on the open list, close and start a new callback.
|
||||
*/
|
||||
call_rcu_zapped(delayed_free.pf + delayed_free.index);
|
||||
|
||||
need_callback =
|
||||
prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* If there's pending free and its callback has not been scheduled,
|
||||
* queue an RCU callback.
|
||||
*/
|
||||
if (need_callback)
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6313,6 +6385,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
|
|||
{
|
||||
struct pending_free *pf;
|
||||
unsigned long flags;
|
||||
bool need_callback;
|
||||
|
||||
init_data_structures_once();
|
||||
|
||||
|
@ -6320,10 +6393,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
|
|||
lockdep_lock();
|
||||
pf = get_pending_free();
|
||||
__lockdep_free_key_range(pf, start, size);
|
||||
call_rcu_zapped(pf);
|
||||
need_callback = prepare_call_rcu_zapped(pf);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
if (need_callback)
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
/*
|
||||
* Wait for any possible iterators from look_up_lock_class() to pass
|
||||
* before continuing to free the memory they refer to.
|
||||
|
@ -6417,6 +6491,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
|
|||
struct pending_free *pf;
|
||||
unsigned long flags;
|
||||
int locked;
|
||||
bool need_callback = false;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
locked = graph_lock();
|
||||
|
@ -6425,11 +6500,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
|
|||
|
||||
pf = get_pending_free();
|
||||
__lockdep_reset_lock(pf, lock);
|
||||
call_rcu_zapped(pf);
|
||||
need_callback = prepare_call_rcu_zapped(pf);
|
||||
|
||||
graph_unlock();
|
||||
out_irq:
|
||||
raw_local_irq_restore(flags);
|
||||
if (need_callback)
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6473,6 +6550,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
|||
struct pending_free *pf;
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
bool need_callback = false;
|
||||
|
||||
might_sleep();
|
||||
|
||||
|
@ -6493,11 +6571,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
|||
if (found) {
|
||||
pf = get_pending_free();
|
||||
__lockdep_free_key_range(pf, key, 1);
|
||||
call_rcu_zapped(pf);
|
||||
need_callback = prepare_call_rcu_zapped(pf);
|
||||
}
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
if (need_callback)
|
||||
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
|
||||
|
||||
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
@ -6551,6 +6632,8 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("=========================\n");
|
||||
pr_warn("WARNING: held lock freed!\n");
|
||||
|
@ -6563,6 +6646,8 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
|
|||
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
|
||||
|
@ -6609,6 +6694,8 @@ static void print_held_locks_bug(void)
|
|||
if (debug_locks_silent)
|
||||
return;
|
||||
|
||||
nbcon_cpu_emergency_enter();
|
||||
|
||||
pr_warn("\n");
|
||||
pr_warn("====================================\n");
|
||||
pr_warn("WARNING: %s/%d still has locks held!\n",
|
||||
|
@ -6618,6 +6705,8 @@ static void print_held_locks_bug(void)
|
|||
lockdep_print_held_locks(current);
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
void debug_check_no_locks_held(void)
|
||||
|
@ -6675,6 +6764,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
|
|||
if (unlikely(curr->lockdep_depth)) {
|
||||
if (!debug_locks_off())
|
||||
return;
|
||||
nbcon_cpu_emergency_enter();
|
||||
pr_warn("\n");
|
||||
pr_warn("================================================\n");
|
||||
pr_warn("WARNING: lock held when returning to user space!\n");
|
||||
|
@ -6683,6 +6773,7 @@ asmlinkage __visible void lockdep_sys_exit(void)
|
|||
pr_warn("%s/%d is leaving the kernel with locks still held!\n",
|
||||
curr->comm, curr->pid);
|
||||
lockdep_print_held_locks(curr);
|
||||
nbcon_cpu_emergency_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6699,6 +6790,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
bool rcu = warn_rcu_enter();
|
||||
|
||||
/* Note: the following can be executed concurrently, so be careful. */
|
||||
nbcon_cpu_emergency_enter();
|
||||
pr_warn("\n");
|
||||
pr_warn("=============================\n");
|
||||
pr_warn("WARNING: suspicious RCU usage\n");
|
||||
|
@ -6737,6 +6829,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||
lockdep_print_held_locks(curr);
|
||||
pr_warn("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
nbcon_cpu_emergency_exit();
|
||||
warn_rcu_exit(rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|
||||
|
|
|
@ -181,12 +181,21 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
|
|||
__rwsem_set_reader_owned(sem, current);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWSEMS
|
||||
/*
|
||||
* Return just the real task structure pointer of the owner
|
||||
*/
|
||||
static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
return (struct task_struct *)
|
||||
(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the rwsem is owned by a reader.
|
||||
*/
|
||||
static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_RWSEMS
|
||||
/*
|
||||
* Check the count to see if it is write-locked.
|
||||
*/
|
||||
|
@ -194,11 +203,9 @@ static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
|
|||
|
||||
if (count & RWSEM_WRITER_MASK)
|
||||
return false;
|
||||
#endif
|
||||
return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RWSEMS
|
||||
/*
|
||||
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
|
||||
* is a task pointer in owner of a reader-owned rwsem, it will be the
|
||||
|
@ -265,15 +272,6 @@ static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return just the real task structure pointer of the owner
|
||||
*/
|
||||
static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
return (struct task_struct *)
|
||||
(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the real task structure pointer of the owner and the embedded
|
||||
* flags in the owner. pflags must be non-NULL.
|
||||
|
|
|
@ -697,3 +697,4 @@ module_exit(test_ww_mutex_exit);
|
|||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_DESCRIPTION("API test facility for ww_mutexes");
|
||||
|
|
Loading…
Reference in New Issue