- Make sure sanity checks down in the mutex lock path happen on the correct
type of task so that they don't trigger falsely - Use the write unsafe user access pairs when writing a futex value to prevent an error on PowerPC which does user read and write accesses differently -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmihlaAACgkQEsHwGGHe VUrV7g/+Kx4n1TQuGnk4kd5h5q0uls8mgeFddYjv6BgVxcaWq7Tzv7XMJ5hvWEqp P/+Zt43Sv9sd7i+PhoFD2Lr+EDYx8c0Lp08/LH0zsgKIA2Ai8ntJHJcb3se3Kxr5 yV23d0tkrCijcB58OL1xncm96Lp3XoXyTz8b0ahKDNG9mS8F/9XK9GgmG9OqKXDg 7T8Vx5NKt0YrvAWwvsQQlTUTcQ4a4O/UMwJmgEbvqHn0WwQISRxx/TE6wYuIwWAj pbrN5kzDsZ6tA07h48NWnkFOEeqsQgbbKDkWvYYRYBrVzEATQBpBWfSQ0HsaqPmc 1Mk5Zs+J5UFhHx7Yw348JVqw5Fl4VDT4Oi4AoIzjBym3c73nrNfZzESRsf4dES5Q DBsgTb0tjEZcR7MrWWErYu1LXw1qP5Ib39qLDVIvQQ4HomctSUuXVTIRL9qJvaCK aCPt2Ivkhj3wItZSeTfzLTXbWE9lP4AhuBpJ4ALHRbOaCRLNfK9ZzLfOUyKePUvx s3j7iPubfS5/lw192z5weLzEE4e8E7wIxSkIQNKLQFI/kr5YwKfwEO5Zm+UMfH5j m+Hl7YKS0nT2IlFbRel2cSkw4MDaEJjgahMzbp+D0p+xV2H4KjY4nLoarwsuoP8D GxLAOmRW1nzqj3QHIWsBF9iBxkO89lshWOgGxhUbhtywNqxSV6M= =q1tX -----END PGP SIGNATURE----- Merge tag 'locking_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Borislav Petkov: - Make sure sanity checks down in the mutex lock path happen on the correct type of task so that they don't trigger falsely - Use the write unsafe user access pairs when writing a futex value to prevent an error on PowerPC which does user read and write accesses differently * tag 'locking_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking: Fix __clear_task_blocked_on() warning from __ww_mutex_wound() path futex: Use user_write_access_begin/_end() in futex_put_value()
This commit is contained in:
commit
0a9ee9ce49
|
@ -2152,6 +2152,8 @@ static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
|
||||||
|
|
||||||
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
{
|
{
|
||||||
|
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
|
||||||
|
|
||||||
WARN_ON_ONCE(!m);
|
WARN_ON_ONCE(!m);
|
||||||
/* The task should only be setting itself as blocked */
|
/* The task should only be setting itself as blocked */
|
||||||
WARN_ON_ONCE(p != current);
|
WARN_ON_ONCE(p != current);
|
||||||
|
@ -2162,8 +2164,8 @@ static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
* with a different mutex. Note, setting it to the same
|
* with a different mutex. Note, setting it to the same
|
||||||
* lock repeatedly is ok.
|
* lock repeatedly is ok.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
|
WARN_ON_ONCE(blocked_on && blocked_on != m);
|
||||||
p->blocked_on = m;
|
WRITE_ONCE(p->blocked_on, m);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
|
@ -2174,16 +2176,19 @@ static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
|
|
||||||
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
|
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!m);
|
if (m) {
|
||||||
/* Currently we serialize blocked_on under the mutex::wait_lock */
|
struct mutex *blocked_on = READ_ONCE(p->blocked_on);
|
||||||
lockdep_assert_held_once(&m->wait_lock);
|
|
||||||
/*
|
/* Currently we serialize blocked_on under the mutex::wait_lock */
|
||||||
* There may be cases where we re-clear already cleared
|
lockdep_assert_held_once(&m->wait_lock);
|
||||||
* blocked_on relationships, but make sure we are not
|
/*
|
||||||
* clearing the relationship with a different lock.
|
* There may be cases where we re-clear already cleared
|
||||||
*/
|
* blocked_on relationships, but make sure we are not
|
||||||
WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);
|
* clearing the relationship with a different lock.
|
||||||
p->blocked_on = NULL;
|
*/
|
||||||
|
WARN_ON_ONCE(blocked_on && blocked_on != m);
|
||||||
|
}
|
||||||
|
WRITE_ONCE(p->blocked_on, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
|
static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
|
||||||
|
|
|
@ -319,13 +319,13 @@ static __always_inline int futex_put_value(u32 val, u32 __user *to)
|
||||||
{
|
{
|
||||||
if (can_do_masked_user_access())
|
if (can_do_masked_user_access())
|
||||||
to = masked_user_access_begin(to);
|
to = masked_user_access_begin(to);
|
||||||
else if (!user_read_access_begin(to, sizeof(*to)))
|
else if (!user_write_access_begin(to, sizeof(*to)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
unsafe_put_user(val, to, Efault);
|
unsafe_put_user(val, to, Efault);
|
||||||
user_read_access_end();
|
user_write_access_end();
|
||||||
return 0;
|
return 0;
|
||||||
Efault:
|
Efault:
|
||||||
user_read_access_end();
|
user_write_access_end();
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -342,8 +342,12 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
|
||||||
* When waking up the task to wound, be sure to clear the
|
* When waking up the task to wound, be sure to clear the
|
||||||
* blocked_on pointer. Otherwise we can see circular
|
* blocked_on pointer. Otherwise we can see circular
|
||||||
* blocked_on relationships that can't resolve.
|
* blocked_on relationships that can't resolve.
|
||||||
|
*
|
||||||
|
* NOTE: We pass NULL here instead of lock, because we
|
||||||
|
* are waking the mutex owner, who may be currently
|
||||||
|
* blocked on a different mutex.
|
||||||
*/
|
*/
|
||||||
__clear_task_blocked_on(owner, lock);
|
__clear_task_blocked_on(owner, NULL);
|
||||||
wake_q_add(wake_q, owner);
|
wake_q_add(wake_q, owner);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|
Loading…
Reference in New Issue