fs/nfs/io: make nfs_start_io_*() killable
[ Upstream commit38a125b315
] This allows killing processes that wait for a lock when one process is stuck waiting for the NFS server. This aims to complete the coverage of NFS operations being killable, like nfs_direct_wait() does, for example. Signed-off-by: Max Kellermann <max.kellermann@ionos.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> Stable-dep-of:9eb90f4354
("NFS: Serialise O_DIRECT i/o and truncate()") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
fd84053daf
commit
7f08d14103
|
@ -472,8 +472,16 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
if (user_backed_iter(iter))
|
if (user_backed_iter(iter))
|
||||||
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
|
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
|
||||||
|
|
||||||
if (!swap)
|
if (!swap) {
|
||||||
nfs_start_io_direct(inode);
|
result = nfs_start_io_direct(inode);
|
||||||
|
if (result) {
|
||||||
|
/* release the reference that would usually be
|
||||||
|
* consumed by nfs_direct_read_schedule_iovec()
|
||||||
|
*/
|
||||||
|
nfs_direct_req_release(dreq);
|
||||||
|
goto out_release;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
NFS_I(inode)->read_io += count;
|
NFS_I(inode)->read_io += count;
|
||||||
requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
|
requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
|
||||||
|
@ -1031,7 +1039,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
|
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
|
||||||
FLUSH_STABLE);
|
FLUSH_STABLE);
|
||||||
} else {
|
} else {
|
||||||
nfs_start_io_direct(inode);
|
result = nfs_start_io_direct(inode);
|
||||||
|
if (result) {
|
||||||
|
/* release the reference that would usually be
|
||||||
|
* consumed by nfs_direct_write_schedule_iovec()
|
||||||
|
*/
|
||||||
|
nfs_direct_req_release(dreq);
|
||||||
|
goto out_release;
|
||||||
|
}
|
||||||
|
|
||||||
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
|
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
|
||||||
FLUSH_COND_STABLE);
|
FLUSH_COND_STABLE);
|
||||||
|
|
|
@ -167,7 +167,10 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
|
||||||
iocb->ki_filp,
|
iocb->ki_filp,
|
||||||
iov_iter_count(to), (unsigned long) iocb->ki_pos);
|
iov_iter_count(to), (unsigned long) iocb->ki_pos);
|
||||||
|
|
||||||
nfs_start_io_read(inode);
|
result = nfs_start_io_read(inode);
|
||||||
|
if (result)
|
||||||
|
return result;
|
||||||
|
|
||||||
result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
|
result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
result = generic_file_read_iter(iocb, to);
|
result = generic_file_read_iter(iocb, to);
|
||||||
|
@ -188,7 +191,10 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
|
||||||
|
|
||||||
dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
|
dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
|
||||||
|
|
||||||
nfs_start_io_read(inode);
|
result = nfs_start_io_read(inode);
|
||||||
|
if (result)
|
||||||
|
return result;
|
||||||
|
|
||||||
result = nfs_revalidate_mapping(inode, in->f_mapping);
|
result = nfs_revalidate_mapping(inode, in->f_mapping);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
result = filemap_splice_read(in, ppos, pipe, len, flags);
|
result = filemap_splice_read(in, ppos, pipe, len, flags);
|
||||||
|
@ -669,7 +675,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
||||||
nfs_clear_invalid_mapping(file->f_mapping);
|
nfs_clear_invalid_mapping(file->f_mapping);
|
||||||
|
|
||||||
since = filemap_sample_wb_err(file->f_mapping);
|
since = filemap_sample_wb_err(file->f_mapping);
|
||||||
nfs_start_io_write(inode);
|
error = nfs_start_io_write(inode);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
result = generic_write_checks(iocb, from);
|
result = generic_write_checks(iocb, from);
|
||||||
if (result > 0)
|
if (result > 0)
|
||||||
result = generic_perform_write(iocb, from);
|
result = generic_perform_write(iocb, from);
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include "nfs4_fs.h"
|
#include "nfs4_fs.h"
|
||||||
#include <linux/fs_context.h>
|
#include <linux/fs_context.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
|
#include <linux/compiler_attributes.h>
|
||||||
#include <linux/crc32.h>
|
#include <linux/crc32.h>
|
||||||
#include <linux/sunrpc/addr.h>
|
#include <linux/sunrpc/addr.h>
|
||||||
#include <linux/nfs_page.h>
|
#include <linux/nfs_page.h>
|
||||||
|
@ -516,11 +517,11 @@ extern const struct netfs_request_ops nfs_netfs_ops;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* io.c */
|
/* io.c */
|
||||||
extern void nfs_start_io_read(struct inode *inode);
|
extern __must_check int nfs_start_io_read(struct inode *inode);
|
||||||
extern void nfs_end_io_read(struct inode *inode);
|
extern void nfs_end_io_read(struct inode *inode);
|
||||||
extern void nfs_start_io_write(struct inode *inode);
|
extern __must_check int nfs_start_io_write(struct inode *inode);
|
||||||
extern void nfs_end_io_write(struct inode *inode);
|
extern void nfs_end_io_write(struct inode *inode);
|
||||||
extern void nfs_start_io_direct(struct inode *inode);
|
extern __must_check int nfs_start_io_direct(struct inode *inode);
|
||||||
extern void nfs_end_io_direct(struct inode *inode);
|
extern void nfs_end_io_direct(struct inode *inode);
|
||||||
|
|
||||||
static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
|
static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
|
||||||
|
|
44
fs/nfs/io.c
44
fs/nfs/io.c
|
@ -39,19 +39,28 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
|
||||||
* Note that buffered writes and truncates both take a write lock on
|
* Note that buffered writes and truncates both take a write lock on
|
||||||
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
|
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
|
||||||
*/
|
*/
|
||||||
void
|
int
|
||||||
nfs_start_io_read(struct inode *inode)
|
nfs_start_io_read(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
|
int err;
|
||||||
|
|
||||||
/* Be an optimist! */
|
/* Be an optimist! */
|
||||||
down_read(&inode->i_rwsem);
|
err = down_read_killable(&inode->i_rwsem);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
|
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
|
||||||
return;
|
return 0;
|
||||||
up_read(&inode->i_rwsem);
|
up_read(&inode->i_rwsem);
|
||||||
|
|
||||||
/* Slow path.... */
|
/* Slow path.... */
|
||||||
down_write(&inode->i_rwsem);
|
err = down_write_killable(&inode->i_rwsem);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
nfs_block_o_direct(nfsi, inode);
|
nfs_block_o_direct(nfsi, inode);
|
||||||
downgrade_write(&inode->i_rwsem);
|
downgrade_write(&inode->i_rwsem);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -74,11 +83,15 @@ nfs_end_io_read(struct inode *inode)
|
||||||
* Declare that a buffered read operation is about to start, and ensure
|
* Declare that a buffered read operation is about to start, and ensure
|
||||||
* that we block all direct I/O.
|
* that we block all direct I/O.
|
||||||
*/
|
*/
|
||||||
void
|
int
|
||||||
nfs_start_io_write(struct inode *inode)
|
nfs_start_io_write(struct inode *inode)
|
||||||
{
|
{
|
||||||
down_write(&inode->i_rwsem);
|
int err;
|
||||||
nfs_block_o_direct(NFS_I(inode), inode);
|
|
||||||
|
err = down_write_killable(&inode->i_rwsem);
|
||||||
|
if (!err)
|
||||||
|
nfs_block_o_direct(NFS_I(inode), inode);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -119,19 +132,28 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
|
||||||
* Note that buffered writes and truncates both take a write lock on
|
* Note that buffered writes and truncates both take a write lock on
|
||||||
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
|
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
|
||||||
*/
|
*/
|
||||||
void
|
int
|
||||||
nfs_start_io_direct(struct inode *inode)
|
nfs_start_io_direct(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
|
int err;
|
||||||
|
|
||||||
/* Be an optimist! */
|
/* Be an optimist! */
|
||||||
down_read(&inode->i_rwsem);
|
err = down_read_killable(&inode->i_rwsem);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
|
if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
|
||||||
return;
|
return 0;
|
||||||
up_read(&inode->i_rwsem);
|
up_read(&inode->i_rwsem);
|
||||||
|
|
||||||
/* Slow path.... */
|
/* Slow path.... */
|
||||||
down_write(&inode->i_rwsem);
|
err = down_write_killable(&inode->i_rwsem);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
nfs_block_buffered(nfsi, inode);
|
nfs_block_buffered(nfsi, inode);
|
||||||
downgrade_write(&inode->i_rwsem);
|
downgrade_write(&inode->i_rwsem);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in New Issue