Merge: io_uring: drop any code related to SCM_RIGHTS
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4245 Pull in patches related to the removal of io_uring fd passing. JIRA: https://issues.redhat.com/browse/RHEL-36366 CVE: CVE-2023-52656 Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Approved-by: Brian Foster <bfoster@redhat.com> Approved-by: Xin Long <lxin@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
This commit is contained in:
commit
28d14c1d4e
|
@ -46,7 +46,6 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
|||
struct iov_iter *iter, void *ioucmd);
|
||||
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
|
||||
unsigned issue_flags);
|
||||
struct sock *io_uring_get_socket(struct file *file);
|
||||
void __io_uring_cancel(bool cancel_all);
|
||||
void __io_uring_free(struct task_struct *tsk);
|
||||
void io_uring_unreg_ringfd(void);
|
||||
|
@ -82,6 +81,7 @@ static inline void io_uring_free(struct task_struct *tsk)
|
|||
__io_uring_free(tsk);
|
||||
}
|
||||
int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
|
||||
bool io_is_uring_fops(struct file *file);
|
||||
#else
|
||||
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
||||
struct iov_iter *iter, void *ioucmd)
|
||||
|
@ -100,10 +100,6 @@ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
|
|||
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
|
||||
{
|
||||
}
|
||||
static inline struct sock *io_uring_get_socket(struct file *file)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void io_uring_task_cancel(void)
|
||||
{
|
||||
}
|
||||
|
@ -122,6 +118,10 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
|
|||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline bool io_is_uring_fops(struct file *file)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -345,9 +345,6 @@ struct io_ring_ctx {
|
|||
struct wait_queue_head rsrc_quiesce_wq;
|
||||
unsigned rsrc_quiesce;
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
struct socket *ring_sock;
|
||||
#endif
|
||||
/* hashed buffered write serialization */
|
||||
struct io_wq_hash *hash_map;
|
||||
|
||||
|
|
|
@ -87,13 +87,10 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
|||
io_file_bitmap_clear(&ctx->file_table, slot_index);
|
||||
}
|
||||
|
||||
ret = io_scm_file_account(ctx, file);
|
||||
if (!ret) {
|
||||
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
|
||||
io_fixed_file_set(file_slot, file);
|
||||
io_file_bitmap_set(&ctx->file_table, slot_index);
|
||||
}
|
||||
return ret;
|
||||
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
|
||||
io_fixed_file_set(file_slot, file);
|
||||
io_file_bitmap_set(&ctx->file_table, slot_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
|
||||
|
|
|
@ -60,7 +60,6 @@
|
|||
#include <linux/net.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_unix.h>
|
||||
#include <net/scm.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -176,19 +175,6 @@ static struct ctl_table kernel_io_uring_disabled_table[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
struct sock *io_uring_get_socket(struct file *file)
|
||||
{
|
||||
#if defined(CONFIG_UNIX)
|
||||
if (io_is_uring_fops(file)) {
|
||||
struct io_ring_ctx *ctx = file->private_data;
|
||||
|
||||
return ctx->ring_sock->sk;
|
||||
}
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(io_uring_get_socket);
|
||||
|
||||
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
|
||||
|
@ -2940,13 +2926,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
|||
io_rsrc_node_destroy(ctx, ctx->rsrc_node);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
if (ctx->ring_sock) {
|
||||
ctx->ring_sock->file = NULL; /* so that iput() is called */
|
||||
sock_release(ctx->ring_sock);
|
||||
}
|
||||
#endif
|
||||
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
|
||||
|
||||
io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
|
||||
|
@ -3834,33 +3813,13 @@ static int io_uring_install_fd(struct file *file)
|
|||
/*
|
||||
* Allocate an anonymous fd, this is what constitutes the application
|
||||
* visible backing of an io_uring instance. The application mmaps this
|
||||
* fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
|
||||
* we have to tie this fd to a socket for file garbage collection purposes.
|
||||
* fd to gain access to the SQ/CQ ring details.
|
||||
*/
|
||||
static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct file *file;
|
||||
#if defined(CONFIG_UNIX)
|
||||
int ret;
|
||||
|
||||
ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
|
||||
&ctx->ring_sock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
#endif
|
||||
|
||||
/* Create a new inode so that the LSM can block the creation. */
|
||||
file = anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
|
||||
return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
|
||||
O_RDWR | O_CLOEXEC, NULL);
|
||||
#if defined(CONFIG_UNIX)
|
||||
if (IS_ERR(file)) {
|
||||
sock_release(ctx->ring_sock);
|
||||
ctx->ring_sock = NULL;
|
||||
} else {
|
||||
ctx->ring_sock->file = file;
|
||||
}
|
||||
#endif
|
||||
return file;
|
||||
}
|
||||
|
||||
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
|
|
|
@ -54,7 +54,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
|||
unsigned issue_flags);
|
||||
|
||||
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
|
||||
bool io_is_uring_fops(struct file *file);
|
||||
bool io_alloc_async_data(struct io_kiocb *req);
|
||||
void io_req_task_queue(struct io_kiocb *req);
|
||||
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
|
||||
|
|
169
io_uring/rsrc.c
169
io_uring/rsrc.c
|
@ -24,7 +24,6 @@ struct io_rsrc_update {
|
|||
};
|
||||
|
||||
static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
|
||||
static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
|
||||
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||
struct io_mapped_ubuf **pimu,
|
||||
struct page **last_hpage);
|
||||
|
@ -157,7 +156,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node)
|
|||
|
||||
switch (node->type) {
|
||||
case IORING_RSRC_FILE:
|
||||
io_rsrc_file_put(node->ctx, prsrc);
|
||||
fput(prsrc->file);
|
||||
break;
|
||||
case IORING_RSRC_BUFFER:
|
||||
io_rsrc_buf_put(node->ctx, prsrc);
|
||||
|
@ -402,23 +401,13 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|||
break;
|
||||
}
|
||||
/*
|
||||
* Don't allow io_uring instances to be registered. If
|
||||
* UNIX isn't enabled, then this causes a reference
|
||||
* cycle and this instance can never get freed. If UNIX
|
||||
* is enabled we'll handle it just fine, but there's
|
||||
* still no point in allowing a ring fd as it doesn't
|
||||
* support regular read/write anyway.
|
||||
* Don't allow io_uring instances to be registered.
|
||||
*/
|
||||
if (io_is_uring_fops(file)) {
|
||||
fput(file);
|
||||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
err = io_scm_file_account(ctx, file);
|
||||
if (err) {
|
||||
fput(file);
|
||||
break;
|
||||
}
|
||||
*io_get_tag_slot(data, i) = tag;
|
||||
io_fixed_file_set(file_slot, file);
|
||||
io_file_bitmap_set(&ctx->file_table, i);
|
||||
|
@ -675,22 +664,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||
for (i = 0; i < ctx->nr_user_files; i++) {
|
||||
struct file *file = io_file_from_index(&ctx->file_table, i);
|
||||
|
||||
/* skip scm accounted files, they'll be freed by ->ring_sock */
|
||||
if (!file || io_file_need_scm(file))
|
||||
if (!file)
|
||||
continue;
|
||||
io_file_bitmap_clear(&ctx->file_table, i);
|
||||
fput(file);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
if (ctx->ring_sock) {
|
||||
struct sock *sock = ctx->ring_sock->sk;
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
#endif
|
||||
io_free_file_tables(&ctx->file_table);
|
||||
io_file_table_set_alloc_range(ctx, 0, 0);
|
||||
io_rsrc_data_free(ctx->file_data);
|
||||
|
@ -718,137 +697,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure the UNIX gc is aware of our file set, so we are certain that
|
||||
* the io_uring can be safely unregistered on process exit, even if we have
|
||||
* loops in the file referencing. We account only files that can hold other
|
||||
* files because otherwise they can't form a loop and so are not interesting
|
||||
* for GC.
|
||||
*/
|
||||
int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
|
||||
{
|
||||
#if defined(CONFIG_UNIX)
|
||||
struct sock *sk = ctx->ring_sock->sk;
|
||||
struct sk_buff_head *head = &sk->sk_receive_queue;
|
||||
struct scm_fp_list *fpl;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (likely(!io_file_need_scm(file)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* See if we can merge this file into an existing skb SCM_RIGHTS
|
||||
* file set. If there's no room, fall back to allocating a new skb
|
||||
* and filling it in.
|
||||
*/
|
||||
spin_lock_irq(&head->lock);
|
||||
skb = skb_peek(head);
|
||||
if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
|
||||
__skb_unlink(skb, head);
|
||||
else
|
||||
skb = NULL;
|
||||
spin_unlock_irq(&head->lock);
|
||||
|
||||
if (!skb) {
|
||||
fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
|
||||
if (!fpl)
|
||||
return -ENOMEM;
|
||||
|
||||
skb = alloc_skb(0, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
kfree(fpl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fpl->user = get_uid(current_user());
|
||||
fpl->max = SCM_MAX_FD;
|
||||
fpl->count = 0;
|
||||
|
||||
UNIXCB(skb).fp = fpl;
|
||||
skb->sk = sk;
|
||||
skb->destructor = io_uring_destruct_scm;
|
||||
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
|
||||
}
|
||||
|
||||
fpl = UNIXCB(skb).fp;
|
||||
fpl->fp[fpl->count++] = get_file(file);
|
||||
unix_inflight(fpl->user, file);
|
||||
skb_queue_head(head, skb);
|
||||
fput(file);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
|
||||
{
|
||||
#if defined(CONFIG_UNIX)
|
||||
struct sock *sock = ctx->ring_sock->sk;
|
||||
struct sk_buff_head list, *head = &sock->sk_receive_queue;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
__skb_queue_head_init(&list);
|
||||
|
||||
/*
|
||||
* Find the skb that holds this file in its SCM_RIGHTS. When found,
|
||||
* remove this entry and rearrange the file array.
|
||||
*/
|
||||
skb = skb_dequeue(head);
|
||||
while (skb) {
|
||||
struct scm_fp_list *fp;
|
||||
|
||||
fp = UNIXCB(skb).fp;
|
||||
for (i = 0; i < fp->count; i++) {
|
||||
int left;
|
||||
|
||||
if (fp->fp[i] != file)
|
||||
continue;
|
||||
|
||||
unix_notinflight(fp->user, fp->fp[i]);
|
||||
left = fp->count - 1 - i;
|
||||
if (left) {
|
||||
memmove(&fp->fp[i], &fp->fp[i + 1],
|
||||
left * sizeof(struct file *));
|
||||
}
|
||||
fp->count--;
|
||||
if (!fp->count) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
} else {
|
||||
__skb_queue_tail(&list, skb);
|
||||
}
|
||||
fput(file);
|
||||
file = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!file)
|
||||
break;
|
||||
|
||||
__skb_queue_tail(&list, skb);
|
||||
|
||||
skb = skb_dequeue(head);
|
||||
}
|
||||
|
||||
if (skb_peek(&list)) {
|
||||
spin_lock_irq(&head->lock);
|
||||
while ((skb = __skb_dequeue(&list)) != NULL)
|
||||
__skb_queue_tail(head, skb);
|
||||
spin_unlock_irq(&head->lock);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
|
||||
{
|
||||
struct file *file = prsrc->file;
|
||||
|
||||
if (likely(!io_file_need_scm(file)))
|
||||
fput(file);
|
||||
else
|
||||
io_rsrc_file_scm_put(ctx, file);
|
||||
}
|
||||
|
||||
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned nr_args, u64 __user *tags)
|
||||
{
|
||||
|
@ -897,21 +745,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||
goto fail;
|
||||
|
||||
/*
|
||||
* Don't allow io_uring instances to be registered. If UNIX
|
||||
* isn't enabled, then this causes a reference cycle and this
|
||||
* instance can never get freed. If UNIX is enabled we'll
|
||||
* handle it just fine, but there's still no point in allowing
|
||||
* a ring fd as it doesn't support regular read/write anyway.
|
||||
* Don't allow io_uring instances to be registered.
|
||||
*/
|
||||
if (io_is_uring_fops(file)) {
|
||||
fput(file);
|
||||
goto fail;
|
||||
}
|
||||
ret = io_scm_file_account(ctx, file);
|
||||
if (ret) {
|
||||
fput(file);
|
||||
goto fail;
|
||||
}
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, i);
|
||||
io_fixed_file_set(file_slot, file);
|
||||
io_file_bitmap_set(&ctx->file_table, i);
|
||||
|
|
|
@ -75,21 +75,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx);
|
|||
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned nr_args, u64 __user *tags);
|
||||
|
||||
int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
|
||||
|
||||
static inline bool io_file_need_scm(struct file *filp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int io_scm_file_account(struct io_ring_ctx *ctx,
|
||||
struct file *file)
|
||||
{
|
||||
if (likely(!io_file_need_scm(file)))
|
||||
return 0;
|
||||
return __io_scm_file_account(ctx, file);
|
||||
}
|
||||
|
||||
int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned nr_args);
|
||||
int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||
|
|
|
@ -105,7 +105,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
|||
if (fd < 0 || !(file = fget_raw(fd)))
|
||||
return -EBADF;
|
||||
/* don't allow io_uring files */
|
||||
if (io_uring_get_socket(file)) {
|
||||
if (io_is_uring_fops(file)) {
|
||||
fput(file);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -34,10 +34,8 @@ struct sock *unix_get_socket(struct file *filp)
|
|||
/* PF_UNIX ? */
|
||||
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
||||
u_sock = s;
|
||||
} else {
|
||||
/* Could be an io_uring instance */
|
||||
u_sock = io_uring_get_socket(filp);
|
||||
}
|
||||
|
||||
return u_sock;
|
||||
}
|
||||
EXPORT_SYMBOL(unix_get_socket);
|
||||
|
|
Loading…
Reference in New Issue