mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile two from Al Viro: - orangefs fix - series of fs/namei.c cleanups from me - VFS stuff coming from overlayfs tree * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: orangefs: Use RCU for destroy_inode vfs: use helper for calling f_op->fsync() mm: use helper for calling f_op->mmap() vfs: use helpers for calling f_op->{read,write}_iter() vfs: pass type instead of fn to do_{loop,iter}_readv_writev() vfs: extract common parts of {compat_,}do_readv_writev() vfs: wrap write f_ops with file_{start,end}_write() vfs: deny copy_file_range() for non regular files vfs: deny fallocate() on directory vfs: create vfs helper vfs_tmpfile() namei.c: split unlazy_walk() namei.c: fold the check for DCACHE_OP_REVALIDATE into d_revalidate() lookup_fast(): clean up the logics around the fallback to non-rcu mode namei: fold unlazy_link() into its sole caller
This commit is contained in:
commit
94e877d0fb
15 changed files with 261 additions and 223 deletions
|
@ -501,9 +501,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
||||||
cmd->iocb.ki_flags = IOCB_DIRECT;
|
cmd->iocb.ki_flags = IOCB_DIRECT;
|
||||||
|
|
||||||
if (rw == WRITE)
|
if (rw == WRITE)
|
||||||
ret = file->f_op->write_iter(&cmd->iocb, &iter);
|
ret = call_write_iter(file, &cmd->iocb, &iter);
|
||||||
else
|
else
|
||||||
ret = file->f_op->read_iter(&cmd->iocb, &iter);
|
ret = call_read_iter(file, &cmd->iocb, &iter);
|
||||||
|
|
||||||
if (ret != -EIOCBQUEUED)
|
if (ret != -EIOCBQUEUED)
|
||||||
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
|
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
|
||||||
|
|
|
@ -141,7 +141,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
|
||||||
if (!obj->base.filp)
|
if (!obj->base.filp)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
|
ret = call_mmap(obj->base.filp, vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -288,7 +288,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
|
||||||
if (!obj->filp)
|
if (!obj->filp)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = obj->filp->f_op->mmap(obj->filp, vma);
|
ret = call_mmap(obj->filp, vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
4
fs/aio.c
4
fs/aio.c
|
@ -1495,7 +1495,7 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
|
||||||
return ret;
|
return ret;
|
||||||
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
|
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = aio_ret(req, file->f_op->read_iter(req, &iter));
|
ret = aio_ret(req, call_read_iter(file, req, &iter));
|
||||||
kfree(iovec);
|
kfree(iovec);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1520,7 +1520,7 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
req->ki_flags |= IOCB_WRITE;
|
req->ki_flags |= IOCB_WRITE;
|
||||||
file_start_write(file);
|
file_start_write(file);
|
||||||
ret = aio_ret(req, file->f_op->write_iter(req, &iter));
|
ret = aio_ret(req, call_write_iter(file, req, &iter));
|
||||||
/*
|
/*
|
||||||
* We release freeze protection in aio_complete(). Fool lockdep
|
* We release freeze protection in aio_complete(). Fool lockdep
|
||||||
* by telling it the lock got released so that it doesn't
|
* by telling it the lock got released so that it doesn't
|
||||||
|
|
|
@ -96,7 +96,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
|
||||||
cfi->cfi_mapcount++;
|
cfi->cfi_mapcount++;
|
||||||
spin_unlock(&cii->c_lock);
|
spin_unlock(&cii->c_lock);
|
||||||
|
|
||||||
return host_file->f_op->mmap(host_file, vma);
|
return call_mmap(host_file, vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
int coda_open(struct inode *coda_inode, struct file *coda_file)
|
int coda_open(struct inode *coda_inode, struct file *coda_file)
|
||||||
|
|
251
fs/namei.c
251
fs/namei.c
|
@ -672,52 +672,83 @@ static bool legitimize_links(struct nameidata *nd)
|
||||||
/**
|
/**
|
||||||
* unlazy_walk - try to switch to ref-walk mode.
|
* unlazy_walk - try to switch to ref-walk mode.
|
||||||
* @nd: nameidata pathwalk data
|
* @nd: nameidata pathwalk data
|
||||||
* @dentry: child of nd->path.dentry or NULL
|
|
||||||
* @seq: seq number to check dentry against
|
|
||||||
* Returns: 0 on success, -ECHILD on failure
|
* Returns: 0 on success, -ECHILD on failure
|
||||||
*
|
*
|
||||||
* unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
|
* unlazy_walk attempts to legitimize the current nd->path and nd->root
|
||||||
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
|
* for ref-walk mode.
|
||||||
* @nd or NULL. Must be called from rcu-walk context.
|
* Must be called from rcu-walk context.
|
||||||
* Nothing should touch nameidata between unlazy_walk() failure and
|
* Nothing should touch nameidata between unlazy_walk() failure and
|
||||||
* terminate_walk().
|
* terminate_walk().
|
||||||
*/
|
*/
|
||||||
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq)
|
static int unlazy_walk(struct nameidata *nd)
|
||||||
{
|
{
|
||||||
struct dentry *parent = nd->path.dentry;
|
struct dentry *parent = nd->path.dentry;
|
||||||
|
|
||||||
BUG_ON(!(nd->flags & LOOKUP_RCU));
|
BUG_ON(!(nd->flags & LOOKUP_RCU));
|
||||||
|
|
||||||
|
nd->flags &= ~LOOKUP_RCU;
|
||||||
|
if (unlikely(!legitimize_links(nd)))
|
||||||
|
goto out2;
|
||||||
|
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
|
||||||
|
goto out1;
|
||||||
|
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
|
||||||
|
if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq)))
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
BUG_ON(nd->inode != parent->d_inode);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out2:
|
||||||
|
nd->path.mnt = NULL;
|
||||||
|
nd->path.dentry = NULL;
|
||||||
|
out1:
|
||||||
|
if (!(nd->flags & LOOKUP_ROOT))
|
||||||
|
nd->root.mnt = NULL;
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
|
return -ECHILD;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* unlazy_child - try to switch to ref-walk mode.
|
||||||
|
* @nd: nameidata pathwalk data
|
||||||
|
* @dentry: child of nd->path.dentry
|
||||||
|
* @seq: seq number to check dentry against
|
||||||
|
* Returns: 0 on success, -ECHILD on failure
|
||||||
|
*
|
||||||
|
* unlazy_child attempts to legitimize the current nd->path, nd->root and dentry
|
||||||
|
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
|
||||||
|
* @nd. Must be called from rcu-walk context.
|
||||||
|
* Nothing should touch nameidata between unlazy_child() failure and
|
||||||
|
* terminate_walk().
|
||||||
|
*/
|
||||||
|
static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
|
||||||
|
{
|
||||||
|
BUG_ON(!(nd->flags & LOOKUP_RCU));
|
||||||
|
|
||||||
nd->flags &= ~LOOKUP_RCU;
|
nd->flags &= ~LOOKUP_RCU;
|
||||||
if (unlikely(!legitimize_links(nd)))
|
if (unlikely(!legitimize_links(nd)))
|
||||||
goto out2;
|
goto out2;
|
||||||
if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
|
if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
|
||||||
goto out2;
|
goto out2;
|
||||||
if (unlikely(!lockref_get_not_dead(&parent->d_lockref)))
|
if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
|
||||||
goto out1;
|
goto out1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For a negative lookup, the lookup sequence point is the parents
|
* We need to move both the parent and the dentry from the RCU domain
|
||||||
* sequence point, and it only needs to revalidate the parent dentry.
|
* to be properly refcounted. And the sequence number in the dentry
|
||||||
*
|
* validates *both* dentry counters, since we checked the sequence
|
||||||
* For a positive lookup, we need to move both the parent and the
|
* number of the parent after we got the child sequence number. So we
|
||||||
* dentry from the RCU domain to be properly refcounted. And the
|
* know the parent must still be valid if the child sequence number is
|
||||||
* sequence number in the dentry validates *both* dentry counters,
|
|
||||||
* since we checked the sequence number of the parent after we got
|
|
||||||
* the child sequence number. So we know the parent must still
|
|
||||||
* be valid if the child sequence number is still valid.
|
|
||||||
*/
|
*/
|
||||||
if (!dentry) {
|
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
|
||||||
if (read_seqcount_retry(&parent->d_seq, nd->seq))
|
goto out;
|
||||||
goto out;
|
if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) {
|
||||||
BUG_ON(nd->inode != parent->d_inode);
|
rcu_read_unlock();
|
||||||
} else {
|
dput(dentry);
|
||||||
if (!lockref_get_not_dead(&dentry->d_lockref))
|
goto drop_root_mnt;
|
||||||
goto out;
|
|
||||||
if (read_seqcount_retry(&dentry->d_seq, seq))
|
|
||||||
goto drop_dentry;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sequence counts matched. Now make sure that the root is
|
* Sequence counts matched. Now make sure that the root is
|
||||||
* still valid and get it if required.
|
* still valid and get it if required.
|
||||||
|
@ -733,10 +764,6 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
drop_dentry:
|
|
||||||
rcu_read_unlock();
|
|
||||||
dput(dentry);
|
|
||||||
goto drop_root_mnt;
|
|
||||||
out2:
|
out2:
|
||||||
nd->path.mnt = NULL;
|
nd->path.mnt = NULL;
|
||||||
out1:
|
out1:
|
||||||
|
@ -749,27 +776,12 @@ drop_root_mnt:
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq)
|
|
||||||
{
|
|
||||||
if (unlikely(!legitimize_path(nd, link, seq))) {
|
|
||||||
drop_links(nd);
|
|
||||||
nd->depth = 0;
|
|
||||||
nd->flags &= ~LOOKUP_RCU;
|
|
||||||
nd->path.mnt = NULL;
|
|
||||||
nd->path.dentry = NULL;
|
|
||||||
if (!(nd->flags & LOOKUP_ROOT))
|
|
||||||
nd->root.mnt = NULL;
|
|
||||||
rcu_read_unlock();
|
|
||||||
} else if (likely(unlazy_walk(nd, NULL, 0)) == 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
path_put(link);
|
|
||||||
return -ECHILD;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
|
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||||
{
|
{
|
||||||
return dentry->d_op->d_revalidate(dentry, flags);
|
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
|
||||||
|
return dentry->d_op->d_revalidate(dentry, flags);
|
||||||
|
else
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -790,7 +802,7 @@ static int complete_walk(struct nameidata *nd)
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
if (!(nd->flags & LOOKUP_ROOT))
|
if (!(nd->flags & LOOKUP_ROOT))
|
||||||
nd->root.mnt = NULL;
|
nd->root.mnt = NULL;
|
||||||
if (unlikely(unlazy_walk(nd, NULL, 0)))
|
if (unlikely(unlazy_walk(nd)))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1016,7 +1028,7 @@ const char *get_link(struct nameidata *nd)
|
||||||
touch_atime(&last->link);
|
touch_atime(&last->link);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
} else if (atime_needs_update_rcu(&last->link, inode)) {
|
} else if (atime_needs_update_rcu(&last->link, inode)) {
|
||||||
if (unlikely(unlazy_walk(nd, NULL, 0)))
|
if (unlikely(unlazy_walk(nd)))
|
||||||
return ERR_PTR(-ECHILD);
|
return ERR_PTR(-ECHILD);
|
||||||
touch_atime(&last->link);
|
touch_atime(&last->link);
|
||||||
}
|
}
|
||||||
|
@ -1035,7 +1047,7 @@ const char *get_link(struct nameidata *nd)
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
res = get(NULL, inode, &last->done);
|
res = get(NULL, inode, &last->done);
|
||||||
if (res == ERR_PTR(-ECHILD)) {
|
if (res == ERR_PTR(-ECHILD)) {
|
||||||
if (unlikely(unlazy_walk(nd, NULL, 0)))
|
if (unlikely(unlazy_walk(nd)))
|
||||||
return ERR_PTR(-ECHILD);
|
return ERR_PTR(-ECHILD);
|
||||||
res = get(dentry, inode, &last->done);
|
res = get(dentry, inode, &last->done);
|
||||||
}
|
}
|
||||||
|
@ -1469,19 +1481,14 @@ static struct dentry *lookup_dcache(const struct qstr *name,
|
||||||
struct dentry *dir,
|
struct dentry *dir,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct dentry *dentry;
|
struct dentry *dentry = d_lookup(dir, name);
|
||||||
int error;
|
|
||||||
|
|
||||||
dentry = d_lookup(dir, name);
|
|
||||||
if (dentry) {
|
if (dentry) {
|
||||||
if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
|
int error = d_revalidate(dentry, flags);
|
||||||
error = d_revalidate(dentry, flags);
|
if (unlikely(error <= 0)) {
|
||||||
if (unlikely(error <= 0)) {
|
if (!error)
|
||||||
if (!error)
|
d_invalidate(dentry);
|
||||||
d_invalidate(dentry);
|
dput(dentry);
|
||||||
dput(dentry);
|
return ERR_PTR(error);
|
||||||
return ERR_PTR(error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dentry;
|
return dentry;
|
||||||
|
@ -1546,7 +1553,7 @@ static int lookup_fast(struct nameidata *nd,
|
||||||
bool negative;
|
bool negative;
|
||||||
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
|
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
|
||||||
if (unlikely(!dentry)) {
|
if (unlikely(!dentry)) {
|
||||||
if (unlazy_walk(nd, NULL, 0))
|
if (unlazy_walk(nd))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1571,14 +1578,8 @@ static int lookup_fast(struct nameidata *nd,
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
|
|
||||||
*seqp = seq;
|
*seqp = seq;
|
||||||
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
|
status = d_revalidate(dentry, nd->flags);
|
||||||
status = d_revalidate(dentry, nd->flags);
|
if (likely(status > 0)) {
|
||||||
if (unlikely(status <= 0)) {
|
|
||||||
if (unlazy_walk(nd, dentry, seq))
|
|
||||||
return -ECHILD;
|
|
||||||
if (status == -ECHILD)
|
|
||||||
status = d_revalidate(dentry, nd->flags);
|
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* Note: do negative dentry check after revalidation in
|
* Note: do negative dentry check after revalidation in
|
||||||
* case that drops it.
|
* case that drops it.
|
||||||
|
@ -1589,15 +1590,17 @@ static int lookup_fast(struct nameidata *nd,
|
||||||
path->dentry = dentry;
|
path->dentry = dentry;
|
||||||
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
|
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
|
||||||
return 1;
|
return 1;
|
||||||
if (unlazy_walk(nd, dentry, seq))
|
|
||||||
return -ECHILD;
|
|
||||||
}
|
}
|
||||||
|
if (unlazy_child(nd, dentry, seq))
|
||||||
|
return -ECHILD;
|
||||||
|
if (unlikely(status == -ECHILD))
|
||||||
|
/* we'd been told to redo it in non-rcu mode */
|
||||||
|
status = d_revalidate(dentry, nd->flags);
|
||||||
} else {
|
} else {
|
||||||
dentry = __d_lookup(parent, &nd->last);
|
dentry = __d_lookup(parent, &nd->last);
|
||||||
if (unlikely(!dentry))
|
if (unlikely(!dentry))
|
||||||
return 0;
|
return 0;
|
||||||
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
|
status = d_revalidate(dentry, nd->flags);
|
||||||
status = d_revalidate(dentry, nd->flags);
|
|
||||||
}
|
}
|
||||||
if (unlikely(status <= 0)) {
|
if (unlikely(status <= 0)) {
|
||||||
if (!status)
|
if (!status)
|
||||||
|
@ -1636,8 +1639,7 @@ again:
|
||||||
if (IS_ERR(dentry))
|
if (IS_ERR(dentry))
|
||||||
goto out;
|
goto out;
|
||||||
if (unlikely(!d_in_lookup(dentry))) {
|
if (unlikely(!d_in_lookup(dentry))) {
|
||||||
if ((dentry->d_flags & DCACHE_OP_REVALIDATE) &&
|
if (!(flags & LOOKUP_NO_REVAL)) {
|
||||||
!(flags & LOOKUP_NO_REVAL)) {
|
|
||||||
int error = d_revalidate(dentry, flags);
|
int error = d_revalidate(dentry, flags);
|
||||||
if (unlikely(error <= 0)) {
|
if (unlikely(error <= 0)) {
|
||||||
if (!error) {
|
if (!error) {
|
||||||
|
@ -1668,7 +1670,7 @@ static inline int may_lookup(struct nameidata *nd)
|
||||||
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
|
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
|
||||||
if (err != -ECHILD)
|
if (err != -ECHILD)
|
||||||
return err;
|
return err;
|
||||||
if (unlazy_walk(nd, NULL, 0))
|
if (unlazy_walk(nd))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
return inode_permission(nd->inode, MAY_EXEC);
|
return inode_permission(nd->inode, MAY_EXEC);
|
||||||
|
@ -1703,9 +1705,17 @@ static int pick_link(struct nameidata *nd, struct path *link,
|
||||||
error = nd_alloc_stack(nd);
|
error = nd_alloc_stack(nd);
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
if (error == -ECHILD) {
|
if (error == -ECHILD) {
|
||||||
if (unlikely(unlazy_link(nd, link, seq)))
|
if (unlikely(!legitimize_path(nd, link, seq))) {
|
||||||
return -ECHILD;
|
drop_links(nd);
|
||||||
error = nd_alloc_stack(nd);
|
nd->depth = 0;
|
||||||
|
nd->flags &= ~LOOKUP_RCU;
|
||||||
|
nd->path.mnt = NULL;
|
||||||
|
nd->path.dentry = NULL;
|
||||||
|
if (!(nd->flags & LOOKUP_ROOT))
|
||||||
|
nd->root.mnt = NULL;
|
||||||
|
rcu_read_unlock();
|
||||||
|
} else if (likely(unlazy_walk(nd)) == 0)
|
||||||
|
error = nd_alloc_stack(nd);
|
||||||
}
|
}
|
||||||
if (error) {
|
if (error) {
|
||||||
path_put(link);
|
path_put(link);
|
||||||
|
@ -2122,7 +2132,7 @@ OK:
|
||||||
}
|
}
|
||||||
if (unlikely(!d_can_lookup(nd->path.dentry))) {
|
if (unlikely(!d_can_lookup(nd->path.dentry))) {
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
if (unlazy_walk(nd, NULL, 0))
|
if (unlazy_walk(nd))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
return -ENOTDIR;
|
return -ENOTDIR;
|
||||||
|
@ -2579,7 +2589,7 @@ mountpoint_last(struct nameidata *nd)
|
||||||
|
|
||||||
/* If we're in rcuwalk, drop out of it to handle last component */
|
/* If we're in rcuwalk, drop out of it to handle last component */
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
if (unlazy_walk(nd, NULL, 0))
|
if (unlazy_walk(nd))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3072,9 +3082,6 @@ static int lookup_open(struct nameidata *nd, struct path *path,
|
||||||
if (d_in_lookup(dentry))
|
if (d_in_lookup(dentry))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!(dentry->d_flags & DCACHE_OP_REVALIDATE))
|
|
||||||
break;
|
|
||||||
|
|
||||||
error = d_revalidate(dentry, nd->flags);
|
error = d_revalidate(dentry, nd->flags);
|
||||||
if (likely(error > 0))
|
if (likely(error > 0))
|
||||||
break;
|
break;
|
||||||
|
@ -3356,13 +3363,50 @@ out:
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
|
||||||
|
{
|
||||||
|
static const struct qstr name = QSTR_INIT("/", 1);
|
||||||
|
struct dentry *child = NULL;
|
||||||
|
struct inode *dir = dentry->d_inode;
|
||||||
|
struct inode *inode;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
/* we want directory to be writable */
|
||||||
|
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
|
||||||
|
if (error)
|
||||||
|
goto out_err;
|
||||||
|
error = -EOPNOTSUPP;
|
||||||
|
if (!dir->i_op->tmpfile)
|
||||||
|
goto out_err;
|
||||||
|
error = -ENOMEM;
|
||||||
|
child = d_alloc(dentry, &name);
|
||||||
|
if (unlikely(!child))
|
||||||
|
goto out_err;
|
||||||
|
error = dir->i_op->tmpfile(dir, child, mode);
|
||||||
|
if (error)
|
||||||
|
goto out_err;
|
||||||
|
error = -ENOENT;
|
||||||
|
inode = child->d_inode;
|
||||||
|
if (unlikely(!inode))
|
||||||
|
goto out_err;
|
||||||
|
if (!(open_flag & O_EXCL)) {
|
||||||
|
spin_lock(&inode->i_lock);
|
||||||
|
inode->i_state |= I_LINKABLE;
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
}
|
||||||
|
return child;
|
||||||
|
|
||||||
|
out_err:
|
||||||
|
dput(child);
|
||||||
|
return ERR_PTR(error);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(vfs_tmpfile);
|
||||||
|
|
||||||
static int do_tmpfile(struct nameidata *nd, unsigned flags,
|
static int do_tmpfile(struct nameidata *nd, unsigned flags,
|
||||||
const struct open_flags *op,
|
const struct open_flags *op,
|
||||||
struct file *file, int *opened)
|
struct file *file, int *opened)
|
||||||
{
|
{
|
||||||
static const struct qstr name = QSTR_INIT("/", 1);
|
|
||||||
struct dentry *child;
|
struct dentry *child;
|
||||||
struct inode *dir;
|
|
||||||
struct path path;
|
struct path path;
|
||||||
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
|
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
|
@ -3370,25 +3414,12 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
|
||||||
error = mnt_want_write(path.mnt);
|
error = mnt_want_write(path.mnt);
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
goto out;
|
goto out;
|
||||||
dir = path.dentry->d_inode;
|
child = vfs_tmpfile(path.dentry, op->mode, op->open_flag);
|
||||||
/* we want directory to be writable */
|
error = PTR_ERR(child);
|
||||||
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
|
if (unlikely(IS_ERR(child)))
|
||||||
if (error)
|
|
||||||
goto out2;
|
goto out2;
|
||||||
if (!dir->i_op->tmpfile) {
|
|
||||||
error = -EOPNOTSUPP;
|
|
||||||
goto out2;
|
|
||||||
}
|
|
||||||
child = d_alloc(path.dentry, &name);
|
|
||||||
if (unlikely(!child)) {
|
|
||||||
error = -ENOMEM;
|
|
||||||
goto out2;
|
|
||||||
}
|
|
||||||
dput(path.dentry);
|
dput(path.dentry);
|
||||||
path.dentry = child;
|
path.dentry = child;
|
||||||
error = dir->i_op->tmpfile(dir, child, op->mode);
|
|
||||||
if (error)
|
|
||||||
goto out2;
|
|
||||||
audit_inode(nd->name, child, 0);
|
audit_inode(nd->name, child, 0);
|
||||||
/* Don't check for other permissions, the inode was just created */
|
/* Don't check for other permissions, the inode was just created */
|
||||||
error = may_open(&path, 0, op->open_flag);
|
error = may_open(&path, 0, op->open_flag);
|
||||||
|
@ -3399,14 +3430,8 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
|
||||||
if (error)
|
if (error)
|
||||||
goto out2;
|
goto out2;
|
||||||
error = open_check_o_direct(file);
|
error = open_check_o_direct(file);
|
||||||
if (error) {
|
if (error)
|
||||||
fput(file);
|
fput(file);
|
||||||
} else if (!(op->open_flag & O_EXCL)) {
|
|
||||||
struct inode *inode = file_inode(file);
|
|
||||||
spin_lock(&inode->i_lock);
|
|
||||||
inode->i_state |= I_LINKABLE;
|
|
||||||
spin_unlock(&inode->i_lock);
|
|
||||||
}
|
|
||||||
out2:
|
out2:
|
||||||
mnt_drop_write(path.mnt);
|
mnt_drop_write(path.mnt);
|
||||||
out:
|
out:
|
||||||
|
|
14
fs/open.c
14
fs/open.c
|
@ -301,12 +301,10 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||||
if (S_ISFIFO(inode->i_mode))
|
if (S_ISFIFO(inode->i_mode))
|
||||||
return -ESPIPE;
|
return -ESPIPE;
|
||||||
|
|
||||||
/*
|
if (S_ISDIR(inode->i_mode))
|
||||||
* Let individual file system decide if it supports preallocation
|
return -EISDIR;
|
||||||
* for directories or not.
|
|
||||||
*/
|
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
||||||
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
|
|
||||||
!S_ISBLK(inode->i_mode))
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* Check for wrap through zero too */
|
/* Check for wrap through zero too */
|
||||||
|
@ -316,7 +314,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||||
if (!file->f_op->fallocate)
|
if (!file->f_op->fallocate)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
sb_start_write(inode->i_sb);
|
file_start_write(file);
|
||||||
ret = file->f_op->fallocate(file, mode, offset, len);
|
ret = file->f_op->fallocate(file, mode, offset, len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -329,7 +327,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
fsnotify_modify(file);
|
fsnotify_modify(file);
|
||||||
|
|
||||||
sb_end_write(inode->i_sb);
|
file_end_write(file);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vfs_fallocate);
|
EXPORT_SYMBOL_GPL(vfs_fallocate);
|
||||||
|
|
|
@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb)
|
||||||
return &orangefs_inode->vfs_inode;
|
return &orangefs_inode->vfs_inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void orangefs_i_callback(struct rcu_head *head)
|
||||||
|
{
|
||||||
|
struct inode *inode = container_of(head, struct inode, i_rcu);
|
||||||
|
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
|
||||||
|
kmem_cache_free(orangefs_inode_cache, orangefs_inode);
|
||||||
|
}
|
||||||
|
|
||||||
static void orangefs_destroy_inode(struct inode *inode)
|
static void orangefs_destroy_inode(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
|
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
|
||||||
|
@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode)
|
||||||
"%s: deallocated %p destroying inode %pU\n",
|
"%s: deallocated %p destroying inode %pU\n",
|
||||||
__func__, orangefs_inode, get_khandle_from_ino(inode));
|
__func__, orangefs_inode, get_khandle_from_ino(inode));
|
||||||
|
|
||||||
kmem_cache_free(orangefs_inode_cache, orangefs_inode);
|
call_rcu(&inode->i_rcu, orangefs_i_callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
130
fs/read_write.c
130
fs/read_write.c
|
@ -23,9 +23,6 @@
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
|
|
||||||
typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *);
|
|
||||||
|
|
||||||
const struct file_operations generic_ro_fops = {
|
const struct file_operations generic_ro_fops = {
|
||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
.read_iter = generic_file_read_iter,
|
.read_iter = generic_file_read_iter,
|
||||||
|
@ -370,7 +367,7 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos)
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
|
|
||||||
iter->type |= READ;
|
iter->type |= READ;
|
||||||
ret = file->f_op->read_iter(&kiocb, iter);
|
ret = call_read_iter(file, &kiocb, iter);
|
||||||
BUG_ON(ret == -EIOCBQUEUED);
|
BUG_ON(ret == -EIOCBQUEUED);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
|
@ -390,7 +387,7 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
|
|
||||||
iter->type |= WRITE;
|
iter->type |= WRITE;
|
||||||
ret = file->f_op->write_iter(&kiocb, iter);
|
ret = call_write_iter(file, &kiocb, iter);
|
||||||
BUG_ON(ret == -EIOCBQUEUED);
|
BUG_ON(ret == -EIOCBQUEUED);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
|
@ -439,7 +436,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
iov_iter_init(&iter, READ, &iov, 1, len);
|
iov_iter_init(&iter, READ, &iov, 1, len);
|
||||||
|
|
||||||
ret = filp->f_op->read_iter(&kiocb, &iter);
|
ret = call_read_iter(filp, &kiocb, &iter);
|
||||||
BUG_ON(ret == -EIOCBQUEUED);
|
BUG_ON(ret == -EIOCBQUEUED);
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -496,7 +493,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
iov_iter_init(&iter, WRITE, &iov, 1, len);
|
iov_iter_init(&iter, WRITE, &iov, 1, len);
|
||||||
|
|
||||||
ret = filp->f_op->write_iter(&kiocb, &iter);
|
ret = call_write_iter(filp, &kiocb, &iter);
|
||||||
BUG_ON(ret == -EIOCBQUEUED);
|
BUG_ON(ret == -EIOCBQUEUED);
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
|
@ -675,7 +672,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
|
||||||
EXPORT_SYMBOL(iov_shorten);
|
EXPORT_SYMBOL(iov_shorten);
|
||||||
|
|
||||||
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
|
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
loff_t *ppos, iter_fn_t fn, int flags)
|
loff_t *ppos, int type, int flags)
|
||||||
{
|
{
|
||||||
struct kiocb kiocb;
|
struct kiocb kiocb;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
@ -692,7 +689,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
|
kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
|
|
||||||
ret = fn(&kiocb, iter);
|
if (type == READ)
|
||||||
|
ret = call_read_iter(filp, &kiocb, iter);
|
||||||
|
else
|
||||||
|
ret = call_write_iter(filp, &kiocb, iter);
|
||||||
BUG_ON(ret == -EIOCBQUEUED);
|
BUG_ON(ret == -EIOCBQUEUED);
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -700,7 +700,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
|
|
||||||
/* Do it by hand, with file-ops */
|
/* Do it by hand, with file-ops */
|
||||||
static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
|
static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
loff_t *ppos, io_fn_t fn, int flags)
|
loff_t *ppos, int type, int flags)
|
||||||
{
|
{
|
||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
|
|
||||||
|
@ -711,7 +711,13 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
|
||||||
struct iovec iovec = iov_iter_iovec(iter);
|
struct iovec iovec = iov_iter_iovec(iter);
|
||||||
ssize_t nr;
|
ssize_t nr;
|
||||||
|
|
||||||
nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
|
if (type == READ) {
|
||||||
|
nr = filp->f_op->read(filp, iovec.iov_base,
|
||||||
|
iovec.iov_len, ppos);
|
||||||
|
} else {
|
||||||
|
nr = filp->f_op->write(filp, iovec.iov_base,
|
||||||
|
iovec.iov_len, ppos);
|
||||||
|
}
|
||||||
|
|
||||||
if (nr < 0) {
|
if (nr < 0) {
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -834,50 +840,32 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t do_readv_writev(int type, struct file *file,
|
static ssize_t __do_readv_writev(int type, struct file *file,
|
||||||
const struct iovec __user * uvector,
|
struct iov_iter *iter, loff_t *pos, int flags)
|
||||||
unsigned long nr_segs, loff_t *pos,
|
|
||||||
int flags)
|
|
||||||
{
|
{
|
||||||
size_t tot_len;
|
size_t tot_len;
|
||||||
struct iovec iovstack[UIO_FASTIOV];
|
ssize_t ret = 0;
|
||||||
struct iovec *iov = iovstack;
|
|
||||||
struct iov_iter iter;
|
|
||||||
ssize_t ret;
|
|
||||||
io_fn_t fn;
|
|
||||||
iter_fn_t iter_fn;
|
|
||||||
|
|
||||||
ret = import_iovec(type, uvector, nr_segs,
|
tot_len = iov_iter_count(iter);
|
||||||
ARRAY_SIZE(iovstack), &iov, &iter);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
tot_len = iov_iter_count(&iter);
|
|
||||||
if (!tot_len)
|
if (!tot_len)
|
||||||
goto out;
|
goto out;
|
||||||
ret = rw_verify_area(type, file, pos, tot_len);
|
ret = rw_verify_area(type, file, pos, tot_len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (type == READ) {
|
if (type != READ)
|
||||||
fn = file->f_op->read;
|
|
||||||
iter_fn = file->f_op->read_iter;
|
|
||||||
} else {
|
|
||||||
fn = (io_fn_t)file->f_op->write;
|
|
||||||
iter_fn = file->f_op->write_iter;
|
|
||||||
file_start_write(file);
|
file_start_write(file);
|
||||||
}
|
|
||||||
|
|
||||||
if (iter_fn)
|
if ((type == READ && file->f_op->read_iter) ||
|
||||||
ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
|
(type == WRITE && file->f_op->write_iter))
|
||||||
|
ret = do_iter_readv_writev(file, iter, pos, type, flags);
|
||||||
else
|
else
|
||||||
ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
|
ret = do_loop_readv_writev(file, iter, pos, type, flags);
|
||||||
|
|
||||||
if (type != READ)
|
if (type != READ)
|
||||||
file_end_write(file);
|
file_end_write(file);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(iov);
|
|
||||||
if ((ret + (type == READ)) > 0) {
|
if ((ret + (type == READ)) > 0) {
|
||||||
if (type == READ)
|
if (type == READ)
|
||||||
fsnotify_access(file);
|
fsnotify_access(file);
|
||||||
|
@ -887,6 +875,27 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t do_readv_writev(int type, struct file *file,
|
||||||
|
const struct iovec __user *uvector,
|
||||||
|
unsigned long nr_segs, loff_t *pos,
|
||||||
|
int flags)
|
||||||
|
{
|
||||||
|
struct iovec iovstack[UIO_FASTIOV];
|
||||||
|
struct iovec *iov = iovstack;
|
||||||
|
struct iov_iter iter;
|
||||||
|
ssize_t ret;
|
||||||
|
|
||||||
|
ret = import_iovec(type, uvector, nr_segs,
|
||||||
|
ARRAY_SIZE(iovstack), &iov, &iter);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = __do_readv_writev(type, file, &iter, pos, flags);
|
||||||
|
kfree(iov);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
|
ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
|
||||||
unsigned long vlen, loff_t *pos, int flags)
|
unsigned long vlen, loff_t *pos, int flags)
|
||||||
{
|
{
|
||||||
|
@ -1064,51 +1073,19 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
|
||||||
unsigned long nr_segs, loff_t *pos,
|
unsigned long nr_segs, loff_t *pos,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
compat_ssize_t tot_len;
|
|
||||||
struct iovec iovstack[UIO_FASTIOV];
|
struct iovec iovstack[UIO_FASTIOV];
|
||||||
struct iovec *iov = iovstack;
|
struct iovec *iov = iovstack;
|
||||||
struct iov_iter iter;
|
struct iov_iter iter;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
io_fn_t fn;
|
|
||||||
iter_fn_t iter_fn;
|
|
||||||
|
|
||||||
ret = compat_import_iovec(type, uvector, nr_segs,
|
ret = compat_import_iovec(type, uvector, nr_segs,
|
||||||
UIO_FASTIOV, &iov, &iter);
|
UIO_FASTIOV, &iov, &iter);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
tot_len = iov_iter_count(&iter);
|
ret = __do_readv_writev(type, file, &iter, pos, flags);
|
||||||
if (!tot_len)
|
|
||||||
goto out;
|
|
||||||
ret = rw_verify_area(type, file, pos, tot_len);
|
|
||||||
if (ret < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (type == READ) {
|
|
||||||
fn = file->f_op->read;
|
|
||||||
iter_fn = file->f_op->read_iter;
|
|
||||||
} else {
|
|
||||||
fn = (io_fn_t)file->f_op->write;
|
|
||||||
iter_fn = file->f_op->write_iter;
|
|
||||||
file_start_write(file);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iter_fn)
|
|
||||||
ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags);
|
|
||||||
else
|
|
||||||
ret = do_loop_readv_writev(file, &iter, pos, fn, flags);
|
|
||||||
|
|
||||||
if (type != READ)
|
|
||||||
file_end_write(file);
|
|
||||||
|
|
||||||
out:
|
|
||||||
kfree(iov);
|
kfree(iov);
|
||||||
if ((ret + (type == READ)) > 0) {
|
|
||||||
if (type == READ)
|
|
||||||
fsnotify_access(file);
|
|
||||||
else
|
|
||||||
fsnotify_modify(file);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1518,6 +1495,11 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
|
||||||
if (flags != 0)
|
if (flags != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
|
||||||
|
return -EISDIR;
|
||||||
|
if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = rw_verify_area(READ, file_in, &pos_in, len);
|
ret = rw_verify_area(READ, file_in, &pos_in, len);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1538,7 +1520,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
|
||||||
if (len == 0)
|
if (len == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sb_start_write(inode_out->i_sb);
|
file_start_write(file_out);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try cloning first, this is supported by more file systems, and
|
* Try cloning first, this is supported by more file systems, and
|
||||||
|
@ -1574,7 +1556,7 @@ done:
|
||||||
inc_syscr(current);
|
inc_syscr(current);
|
||||||
inc_syscw(current);
|
inc_syscw(current);
|
||||||
|
|
||||||
sb_end_write(inode_out->i_sb);
|
file_end_write(file_out);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,7 +307,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
|
||||||
idx = to.idx;
|
idx = to.idx;
|
||||||
init_sync_kiocb(&kiocb, in);
|
init_sync_kiocb(&kiocb, in);
|
||||||
kiocb.ki_pos = *ppos;
|
kiocb.ki_pos = *ppos;
|
||||||
ret = in->f_op->read_iter(&kiocb, &to);
|
ret = call_read_iter(in, &kiocb, &to);
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
*ppos = kiocb.ki_pos;
|
*ppos = kiocb.ki_pos;
|
||||||
file_accessed(in);
|
file_accessed(in);
|
||||||
|
|
|
@ -192,7 +192,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
mark_inode_dirty_sync(inode);
|
mark_inode_dirty_sync(inode);
|
||||||
}
|
}
|
||||||
return file->f_op->fsync(file, start, end, datasync);
|
return call_fsync(file, start, end, datasync);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vfs_fsync_range);
|
EXPORT_SYMBOL(vfs_fsync_range);
|
||||||
|
|
||||||
|
|
|
@ -1567,6 +1567,9 @@ extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
|
||||||
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
|
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
|
||||||
extern int vfs_whiteout(struct inode *, struct dentry *);
|
extern int vfs_whiteout(struct inode *, struct dentry *);
|
||||||
|
|
||||||
|
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
|
||||||
|
int open_flag);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VFS file helper functions.
|
* VFS file helper functions.
|
||||||
*/
|
*/
|
||||||
|
@ -1718,6 +1721,29 @@ struct inode_operations {
|
||||||
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
int (*set_acl)(struct inode *, struct posix_acl *, int);
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
|
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
|
||||||
|
struct iov_iter *iter)
|
||||||
|
{
|
||||||
|
return file->f_op->read_iter(kio, iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
|
||||||
|
struct iov_iter *iter)
|
||||||
|
{
|
||||||
|
return file->f_op->write_iter(kio, iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
return file->f_op->mmap(file, vma);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int call_fsync(struct file *file, loff_t start, loff_t end,
|
||||||
|
int datasync)
|
||||||
|
{
|
||||||
|
return file->f_op->fsync(file, start, end, datasync);
|
||||||
|
}
|
||||||
|
|
||||||
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
|
||||||
unsigned long nr_segs, unsigned long fast_segs,
|
unsigned long nr_segs, unsigned long fast_segs,
|
||||||
struct iovec *fast_pointer,
|
struct iovec *fast_pointer,
|
||||||
|
@ -1744,19 +1770,6 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
|
||||||
extern int vfs_dedupe_file_range(struct file *file,
|
extern int vfs_dedupe_file_range(struct file *file,
|
||||||
struct file_dedupe_range *same);
|
struct file_dedupe_range *same);
|
||||||
|
|
||||||
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
|
||||||
struct file *file_out, loff_t pos_out,
|
|
||||||
u64 len)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
sb_start_write(file_inode(file_out)->i_sb);
|
|
||||||
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
|
||||||
sb_end_write(file_inode(file_out)->i_sb);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct super_operations {
|
struct super_operations {
|
||||||
struct inode *(*alloc_inode)(struct super_block *sb);
|
struct inode *(*alloc_inode)(struct super_block *sb);
|
||||||
void (*destroy_inode)(struct inode *);
|
void (*destroy_inode)(struct inode *);
|
||||||
|
@ -2568,6 +2581,19 @@ static inline void file_end_write(struct file *file)
|
||||||
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
|
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||||
|
struct file *file_out, loff_t pos_out,
|
||||||
|
u64 len)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
file_start_write(file_out);
|
||||||
|
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||||
|
file_end_write(file_out);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get_write_access() gets write permission for a file.
|
* get_write_access() gets write permission for a file.
|
||||||
* put_write_access() releases this write permission.
|
* put_write_access() releases this write permission.
|
||||||
|
|
|
@ -423,7 +423,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = sfd->file->f_op->mmap(sfd->file, vma);
|
ret = call_mmap(sfd->file, vma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
shm_close(vma);
|
shm_close(vma);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -452,7 +452,7 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
|
|
||||||
if (!sfd->file->f_op->fsync)
|
if (!sfd->file->f_op->fsync)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
|
return call_fsync(sfd->file, start, end, datasync);
|
||||||
}
|
}
|
||||||
|
|
||||||
static long shm_fallocate(struct file *file, int mode, loff_t offset,
|
static long shm_fallocate(struct file *file, int mode, loff_t offset,
|
||||||
|
|
|
@ -1672,7 +1672,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||||
* new file must not have been exposed to user-space, yet.
|
* new file must not have been exposed to user-space, yet.
|
||||||
*/
|
*/
|
||||||
vma->vm_file = get_file(file);
|
vma->vm_file = get_file(file);
|
||||||
error = file->f_op->mmap(file, vma);
|
error = call_mmap(file, vma);
|
||||||
if (error)
|
if (error)
|
||||||
goto unmap_and_free_vma;
|
goto unmap_and_free_vma;
|
||||||
|
|
||||||
|
|
|
@ -1084,7 +1084,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
|
ret = call_mmap(vma->vm_file, vma);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
vma->vm_region->vm_top = vma->vm_region->vm_end;
|
vma->vm_region->vm_top = vma->vm_region->vm_end;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1115,7 +1115,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
|
||||||
* - VM_MAYSHARE will be set if it may attempt to share
|
* - VM_MAYSHARE will be set if it may attempt to share
|
||||||
*/
|
*/
|
||||||
if (capabilities & NOMMU_MAP_DIRECT) {
|
if (capabilities & NOMMU_MAP_DIRECT) {
|
||||||
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
|
ret = call_mmap(vma->vm_file, vma);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
/* shouldn't return success if we're not sharing */
|
/* shouldn't return success if we're not sharing */
|
||||||
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
|
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue