Skip to content

Commit

Permalink
Merge patch series "cachefiles: random bugfixes"
Browse files Browse the repository at this point in the history
[email protected] <[email protected]> says:

This is the third version of this patch series, in which another patch set
is subsumed into this one to avoid confusing the two patch sets.
(https://patchwork.kernel.org/project/linux-fsdevel/list/?series=854914)

We've been testing ondemand mode for cachefiles since January, and we're
almost done. We hit a lot of issues during the testing period, and this
patch series fixes some of the issues. The patches have passed internal
testing without regression.

The following is a brief overview of the patches, see the patches for
more details.

Patch 1-2: Add fscache_try_get_volume() helper function to avoid
fscache_volume use-after-free on cache withdrawal.

Patch 3: Fix cachefiles_lookup_cookie() and cachefiles_withdraw_cache()
concurrency causing cachefiles_volume use-after-free.

Patch 4: Propagate error codes returned by vfs_getxattr() to avoid
endless loops.

Patch 5-7: A read request waiting for reopen could be closed maliciously
before the reopen worker is executing or waiting to be scheduled. So
ondemand_object_worker() may be called after the info and object and even
the cache have been freed and trigger use-after-free. So use
cancel_work_sync() in cachefiles_ondemand_clean_object() to cancel the
reopen worker or wait for it to finish. Since it makes no sense to wait
for the daemon to complete the reopen request, to avoid this pointless
operation blocking cancel_work_sync(), Patch 1 avoids request generation
by the DROPPING state when the request has not been sent, and Patch 2
flushes the requests of the current object before cancel_work_sync().

Patch 8: Cyclic allocation of msg_id to avoid msg_id reuse misleading
the daemon to cause hung.

Patch 9: Hold xas_lock during polling to avoid dereferencing reqs causing
use-after-free. This issue was triggered frequently in our tests, and we
found that anolis 5.10 had fixed it. So to avoid failing the test, this
patch is pushed upstream as well.

Baokun Li (7):
  netfs, fscache: export fscache_put_volume() and add
    fscache_try_get_volume()
  cachefiles: fix slab-use-after-free in fscache_withdraw_volume()
  cachefiles: fix slab-use-after-free in cachefiles_withdraw_cookie()
  cachefiles: propagate errors from vfs_getxattr() to avoid infinite
    loop
  cachefiles: stop sending new request when dropping object
  cachefiles: cancel all requests for the object that is being dropped
  cachefiles: cyclic allocation of msg_id to avoid reuse

Hou Tao (1):
  cachefiles: wait for ondemand_object_worker to finish when dropping
    object

Jingbo Xu (1):
  cachefiles: add missing lock protection when polling

 fs/cachefiles/cache.c          | 45 ++++++++++++++++++++++++++++-
 fs/cachefiles/daemon.c         |  4 +--
 fs/cachefiles/internal.h       |  3 ++
 fs/cachefiles/ondemand.c       | 52 ++++++++++++++++++++++++++++++----
 fs/cachefiles/volume.c         |  1 -
 fs/cachefiles/xattr.c          |  5 +++-
 fs/netfs/fscache_volume.c      | 14 +++++++++
 fs/netfs/internal.h            |  2 --
 include/linux/fscache-cache.h  |  6 ++++
 include/trace/events/fscache.h |  4 +++
 10 files changed, 123 insertions(+), 13 deletions(-)

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Christian Brauner <[email protected]>
  • Loading branch information
brauner committed Jul 5, 2024
2 parents 22a40d1 + cf5bb09 commit eeb1798
Show file tree
Hide file tree
Showing 23 changed files with 201 additions and 126 deletions.
45 changes: 44 additions & 1 deletion fs/cachefiles/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/namei.h>
#include <trace/events/fscache.h>
#include "internal.h"

/*
Expand Down Expand Up @@ -312,26 +313,67 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
}

/*
* Withdraw volumes.
* Withdraw fscache volumes.
*/
static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
{
struct list_head *cur;
struct cachefiles_volume *volume;
struct fscache_volume *vcookie;

_enter("");
retry:
spin_lock(&cache->object_list_lock);
list_for_each(cur, &cache->volumes) {
volume = list_entry(cur, struct cachefiles_volume, cache_link);

if (atomic_read(&volume->vcookie->n_accesses) == 0)
continue;

vcookie = fscache_try_get_volume(volume->vcookie,
fscache_volume_get_withdraw);
if (vcookie) {
spin_unlock(&cache->object_list_lock);
fscache_withdraw_volume(vcookie);
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
goto retry;
}
}
spin_unlock(&cache->object_list_lock);

_leave("");
}

/*
* Withdraw cachefiles volumes.
*/
static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
{
_enter("");

for (;;) {
struct fscache_volume *vcookie = NULL;
struct cachefiles_volume *volume = NULL;

spin_lock(&cache->object_list_lock);
if (!list_empty(&cache->volumes)) {
volume = list_first_entry(&cache->volumes,
struct cachefiles_volume, cache_link);
vcookie = fscache_try_get_volume(volume->vcookie,
fscache_volume_get_withdraw);
if (!vcookie) {
spin_unlock(&cache->object_list_lock);
cpu_relax();
continue;
}
list_del_init(&volume->cache_link);
}
spin_unlock(&cache->object_list_lock);
if (!volume)
break;

cachefiles_withdraw_volume(volume);
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
}

_leave("");
Expand Down Expand Up @@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
pr_info("File cache on %s unregistering\n", fscache->name);

fscache_withdraw_cache(fscache);
cachefiles_withdraw_fscache_volumes(cache);

/* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be
Expand Down
4 changes: 2 additions & 2 deletions fs/cachefiles/daemon.c
Original file line number Diff line number Diff line change
Expand Up @@ -366,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file,

if (cachefiles_in_ondemand_mode(cache)) {
if (!xa_empty(&cache->reqs)) {
rcu_read_lock();
xas_lock(&xas);
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
if (!cachefiles_ondemand_is_reopening_read(req)) {
mask |= EPOLLIN;
break;
}
}
rcu_read_unlock();
xas_unlock(&xas);
}
} else {
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
Expand Down
3 changes: 3 additions & 0 deletions fs/cachefiles/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ enum cachefiles_object_state {
CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
};

struct cachefiles_ondemand_info {
Expand Down Expand Up @@ -128,6 +129,7 @@ struct cachefiles_cache {
unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
u32 msg_id_next;
};

static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
Expand Down Expand Up @@ -335,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);

static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
{
Expand Down
52 changes: 46 additions & 6 deletions fs/cachefiles/ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
*/
xas_lock(&xas);

if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
cachefiles_ondemand_object_is_dropping(object)) {
xas_unlock(&xas);
ret = -EIO;
goto out;
Expand All @@ -527,20 +528,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
smp_mb();

if (opcode == CACHEFILES_OP_CLOSE &&
!cachefiles_ondemand_object_is_open(object)) {
!cachefiles_ondemand_object_is_open(object)) {
WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
xas_unlock(&xas);
ret = -EIO;
goto out;
}

xas.xa_index = 0;
/*
* Cyclically find a free xas to avoid msg_id reuse that would
* cause the daemon to successfully copen a stale msg_id.
*/
xas.xa_index = cache->msg_id_next;
xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART) {
xas.xa_index = 0;
xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
}
if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY);

xas_store(&xas, req);
xas_clear_mark(&xas, XA_FREE_MARK);
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
if (xas_valid(&xas)) {
cache->msg_id_next = xas.xa_index + 1;
xas_clear_mark(&xas, XA_FREE_MARK);
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
}
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));

Expand Down Expand Up @@ -568,7 +581,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
* If error occurs after creating the anonymous fd,
* cachefiles_ondemand_fd_release() will set object to close.
*/
if (opcode == CACHEFILES_OP_OPEN)
if (opcode == CACHEFILES_OP_OPEN &&
!cachefiles_ondemand_object_is_dropping(object))
cachefiles_ondemand_set_object_close(object);
kfree(req);
return ret;
Expand Down Expand Up @@ -667,8 +681,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)

void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
{
unsigned long index;
struct cachefiles_req *req;
struct cachefiles_cache *cache;

if (!object->ondemand)
return;

cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
cachefiles_ondemand_init_close_req, NULL);

if (!object->ondemand->ondemand_id)
return;

/* Cancel all requests for the object that is being dropped. */
cache = object->volume->cache;
xa_lock(&cache->reqs);
cachefiles_ondemand_set_object_dropping(object);
xa_for_each(&cache->reqs, index, req) {
if (req->object == object) {
req->error = -EIO;
complete(&req->done);
__xa_erase(&cache->reqs, index);
}
}
xa_unlock(&cache->reqs);

/* Wait for ondemand_object_worker() to finish to avoid UAF. */
cancel_work_sync(&object->ondemand->ondemand_work);
}

int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
Expand Down
1 change: 0 additions & 1 deletion fs/cachefiles/volume.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)

void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
{
fscache_withdraw_volume(volume->vcookie);
cachefiles_set_volume_xattr(volume);
__cachefiles_free_volume(volume);
}
5 changes: 4 additions & 1 deletion fs/cachefiles/xattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
if (xlen == 0)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
if (xlen != tlen) {
if (xlen < 0)
if (xlen < 0) {
ret = xlen;
trace_cachefiles_vfs_error(object, file_inode(file), xlen,
cachefiles_trace_getxattr_error);
}
if (xlen == -EIO)
cachefiles_io_error_obj(
object,
Expand Down Expand Up @@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
if (xlen != len) {
if (xlen < 0) {
ret = xlen;
trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
cachefiles_trace_getxattr_error);
if (xlen == -EIO)
Expand Down
14 changes: 7 additions & 7 deletions fs/netfs/buffered_read.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
kdebug("no unlock");
else
folio_unlock(folio);
}
Expand Down Expand Up @@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl)
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret;

_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));

if (readahead_count(ractl) == 0)
return;
Expand Down Expand Up @@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
struct folio *sink = NULL;
int ret;

_enter("%lx", folio->index);
kenter("%lx", folio->index);

rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
Expand Down Expand Up @@ -508,7 +508,7 @@ int netfs_write_begin(struct netfs_inode *ctx,

have_folio:
*_folio = folio;
_leave(" = 0");
kleave(" = 0");
return 0;

error_put:
Expand All @@ -518,7 +518,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
folio_unlock(folio);
folio_put(folio);
}
_leave(" = %d", ret);
kleave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
Expand All @@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t flen = folio_size(folio);
int ret;

_enter("%zx @%llx", flen, start);
kenter("%zx @%llx", flen, start);

ret = -ENOMEM;

Expand Down Expand Up @@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
error:
_leave(" = %d", ret);
kleave(" = %d", ret);
return ret;
}

Expand Down
12 changes: 6 additions & 6 deletions fs/netfs/buffered_write.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_file_pos(folio);

_enter("");
kenter("");

if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
Expand Down Expand Up @@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
*/
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
flen, offset, part, maybe_trouble);
_debug("howto %u", howto);
kdebug("howto %u", howto);
switch (howto) {
case NETFS_JUST_PREFETCH:
ret = netfs_prefetch_for_write(file, folio, offset, part);
if (ret < 0) {
_debug("prefetch = %zd", ret);
kdebug("prefetch = %zd", ret);
goto error_folio_unlock;
}
break;
Expand Down Expand Up @@ -418,7 +418,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
}

iocb->ki_pos += written;
_leave(" = %zd [%zd]", written, ret);
kleave(" = %zd [%zd]", written, ret);
return written ? written : ret;

error_folio_unlock:
Expand Down Expand Up @@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret;

_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));

if (!iov_iter_count(from))
return 0;
Expand Down Expand Up @@ -528,7 +528,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
vm_fault_t ret = VM_FAULT_RETRY;
int err;

_enter("%lx", folio->index);
kenter("%lx", folio->index);

sb_start_pagefault(inode->i_sb);

Expand Down
2 changes: 1 addition & 1 deletion fs/netfs/direct_read.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
size_t orig_count = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);

_enter("");
kenter("");

if (!orig_count)
return 0; /* Don't update atime */
Expand Down
Loading

0 comments on commit eeb1798

Please sign in to comment.