From c23406ed77e0736e306e75700fafe3e63ea48091 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Mon, 21 Nov 2022 19:42:24 +0800 Subject: [PATCH 1/6] anolis: fscache: export fscache_object_wq ANBZ: #3213 ... in prep for the following failover feature for the on-demand mode of Cachefiles. Signed-off-by: Jingbo Xu --- fs/fscache/main.c | 1 + include/linux/fscache-cache.h | 1 + 2 files changed, 2 insertions(+) diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 4207f98e405f..a9f059220418 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -39,6 +39,7 @@ MODULE_PARM_DESC(fscache_debug, struct kobject *fscache_root; struct workqueue_struct *fscache_object_wq; +EXPORT_SYMBOL(fscache_object_wq); struct workqueue_struct *fscache_op_wq; DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 31f2f13e2924..f3ae78d1e5f3 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -74,6 +74,7 @@ struct fscache_cache { }; extern wait_queue_head_t fscache_cache_cleared_wq; +extern struct workqueue_struct *fscache_object_wq; /* * operation to be applied to a cache object -- Gitee From ec1cfef05e16f3820d8e2629884bc384971a1606 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Thu, 14 Jul 2022 11:05:12 +0800 Subject: [PATCH 2/6] anolis: cachefiles: introduce object ondemand state ANBZ: #3213 cherry-picked from https://lore.kernel.org/lkml/20221014080559.42108-5-zhujia.zj@bytedance.com/T/#m3b0f70a08a6e814e963eeeffe6c77585a2347662 Previously, @ondemand_id field was used not only to identify ondemand state of the object, but also to represent the index of the xarray. This commit introduces @state field to decouple the role of @ondemand_id and adds helpers to access it. Signed-off-by: Jia Zhu Reviewed-by: Xin Yin Reviewed-by: Jingbo Xu Signed-off-by: Jia Zhu Signed-off-by: Jingbo Xu --- fs/cachefiles/internal.h | 22 ++++++++++++++++++++++ fs/cachefiles/ondemand.c | 24 +++++++++++------------- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index b8ef5be59005..20d4c41bed2e 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -31,6 +31,11 @@ extern unsigned cachefiles_debug; #define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC) +enum cachefiles_object_state { + CACHEFILES_ONDEMAND_OBJSTATE_close, /* Anonymous fd closed by daemon or initial state */ + CACHEFILES_ONDEMAND_OBJSTATE_open, /* Anonymous fd associated with object is available */ +}; + /* * node records */ @@ -50,6 +55,7 @@ struct cachefiles_object { struct rb_node active_node; /* link in active tree (dentry is key) */ #ifdef CONFIG_CACHEFILES_ONDEMAND int ondemand_id; + enum cachefiles_object_state state; #endif }; @@ -263,6 +269,22 @@ extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len); + +#define CACHEFILES_OBJECT_STATE_FUNCS(_state) \ +static inline bool \ +cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ +{ \ + return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ +} \ + \ +static inline void \ +cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ +{ \ + object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ +} + +CACHEFILES_OBJECT_STATE_FUNCS(open); +CACHEFILES_OBJECT_STATE_FUNCS(close); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen, loff_t *pos) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 250b98e9820c..eb99e7675950 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -20,6 +20,8 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, xa_lock(&cache->reqs); object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + cachefiles_ondemand_set_object_close(object); + /* * Flush all pending READ requests since their completion depends on * anon_fd. @@ -167,6 +169,8 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) else set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + cachefiles_ondemand_set_object_open(req->object); + out: complete(&req->done); return ret; @@ -384,8 +388,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); - - if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { + if (opcode != CACHEFILES_OP_OPEN && + !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand_id == 0); xa_unlock(&cache->reqs); ret = -EIO; @@ -449,18 +453,11 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, void *private) { struct cachefiles_object *object = req->object; - int object_id = object->ondemand_id; - /* - * It's possible that object id is still 0 if the cookie looking up - * phase failed before OPEN request has ever been sent. Also avoid - * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means - * anon_fd has already been closed. - */ - if (object_id <= 0) + if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object_id; + req->msg.object_id = object->ondemand_id; return 0; } @@ -478,7 +475,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, int object_id = object->ondemand_id; /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (object_id <= 0) { + if (!cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object_id == 0); pr_info_once("READ: anonymous fd closed prematurely.\n"); return -EIO; @@ -501,7 +498,8 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) * creating a new tmpfile as the cache file. Reuse the previously * allocated object ID if any. */ - if (object->ondemand_id > 0 || object->type == FSCACHE_COOKIE_TYPE_INDEX) + if (cachefiles_ondemand_object_is_open(object) || + object->type == FSCACHE_COOKIE_TYPE_INDEX) return 0; volume_key_size = object->fscache.parent->cookie->key_len + 1; -- Gitee From 0e52da89ed87d7566ea625303726a1f9874d0902 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Thu, 18 Aug 2022 14:33:21 +0800 Subject: [PATCH 3/6] anolis: cachefiles: extract ondemand info field from cachefiles_object ANBZ: #3213 cherry-picked from https://lore.kernel.org/lkml/20221014080559.42108-5-zhujia.zj@bytedance.com/T/#m77ed5d2ac5a3b779402ff50c1cb66e281d7cb3b9 We'll introduce a @work_struct field for @object in subsequent patches, it will enlarge the size of @object. As the result of that, this commit extracts ondemand info field from @object. Signed-off-by: Jia Zhu Reviewed-by: Jingbo Xu Signed-off-by: Jia Zhu Signed-off-by: Jingbo Xu --- fs/cachefiles/interface.c | 6 ++++++ fs/cachefiles/internal.h | 22 ++++++++++++++++------ fs/cachefiles/ondemand.c | 29 +++++++++++++++++++++++------ 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 634e7041c0f3..0a946d046724 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -51,6 +51,9 @@ static struct fscache_object *cachefiles_alloc_object( fscache_object_init(&object->fscache, cookie, &cache->cache); + if (cachefiles_ondemand_init_obj_info(object)) + goto nomem_obj_info; + object->type = cookie->def->type; /* get hold of the raw key @@ -102,6 +105,8 @@ static struct fscache_object *cachefiles_alloc_object( nomem_key: kfree(buffer); nomem_buffer: + kfree(object->private); +nomem_obj_info: BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)); kmem_cache_free(cachefiles_object_jar, object); fscache_object_destroyed(&cache->cache); @@ -373,6 +378,7 @@ static void cachefiles_put_object(struct fscache_object *_object, } cache = object->fscache.cache; + kfree(object->private); fscache_object_destroy(&object->fscache); kmem_cache_free(cachefiles_object_jar, object); fscache_object_destroyed(cache); diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 20d4c41bed2e..c44782e12ab1 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -36,6 +36,12 @@ enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_open, /* Anonymous fd associated with object is available */ }; +struct cachefiles_ondemand_info { + int ondemand_id; + enum cachefiles_object_state state; + struct cachefiles_object *object; +}; + /* * node records */ @@ -53,10 +59,7 @@ struct cachefiles_object { uint8_t new; /* T if object new */ spinlock_t work_lock; struct rb_node active_node; /* link in active tree (dentry is key) */ -#ifdef CONFIG_CACHEFILES_ONDEMAND - int ondemand_id; - enum cachefiles_object_state state; -#endif + struct cachefiles_ondemand_info *private; }; extern struct kmem_cache *cachefiles_object_jar; @@ -270,17 +273,19 @@ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len); +extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object); + #define CACHEFILES_OBJECT_STATE_FUNCS(_state) \ static inline bool \ cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ { \ - return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ + return object->private->state == CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ } \ \ static inline void \ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ { \ - object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ + object->private->state = CACHEFILES_ONDEMAND_OBJSTATE_##_state; \ } CACHEFILES_OBJECT_STATE_FUNCS(open); @@ -305,6 +310,11 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object, { return -EOPNOTSUPP; } + +static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object) +{ + return 0; +} #endif /* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index eb99e7675950..9bd328a64bc8 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -9,17 +9,18 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, struct file *file) { struct cachefiles_object *object = file->private_data; - int object_id = object->ondemand_id; struct cachefiles_cache *cache; void **slot; struct radix_tree_iter iter; + struct cachefiles_ondemand_info *info = object->private; + int object_id = info->ondemand_id; struct cachefiles_req *req; cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); xa_lock(&cache->reqs); - object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object); /* @@ -221,7 +222,7 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; req->msg.object_id = object_id; - object->ondemand_id = object_id; + object->private->ondemand_id = object_id; cachefiles_get_unbind_pincount(cache); return 0; @@ -390,7 +391,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, smp_mb(); if (opcode != CACHEFILES_OP_OPEN && !cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object->ondemand_id == 0); + WARN_ON_ONCE(object->private->ondemand_id == 0); xa_unlock(&cache->reqs); ret = -EIO; goto out; @@ -457,7 +458,7 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object->ondemand_id; + req->msg.object_id = object->private->ondemand_id; return 0; } @@ -472,7 +473,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand_id; + int object_id = object->private->ondemand_id; /* Stop enqueuing requests when daemon has closed anon_fd. */ if (!cachefiles_ondemand_object_is_open(object)) { @@ -525,3 +526,19 @@ int cachefiles_ondemand_read(struct cachefiles_object *object, sizeof(struct cachefiles_read), cachefiles_ondemand_init_read_req, &read_ctx); } + +int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object) +{ + struct cachefiles_cache *cache; + + cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); + if (!cachefiles_in_ondemand_mode(cache)) + return 0; + + object->private = kzalloc(sizeof(struct cachefiles_ondemand_info), GFP_KERNEL); + if (!object->private) + return -ENOMEM; + + object->private->object = object; + return 0; +} -- Gitee From 982f04f5aacecef6cea7be756a2ae11fbf442f00 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Thu, 14 Jul 2022 11:19:07 +0800 Subject: [PATCH 4/6] anolis: cachefiles: resend an open request if the read request's object is closed ANBZ: #3213 cherry-picked from https://lore.kernel.org/lkml/20221014080559.42108-5-zhujia.zj@bytedance.com/T/#m86518324ecc585eb1c9e22afa978b2af2169cfd0 When an anonymous fd is closed by user daemon, if there is a new read request for this file comes up, the anonymous fd should be re-opened to handle that read request rather than fail it directly. 1. Introduce reopening state for objects that are closed but have inflight/subsequent read requests. 2. No longer flush READ requests but only CLOSE requests when anonymous fd is closed. 3. Enqueue the reopen work to workqueue, thus user daemon could get rid of daemon_read context and handle that request smoothly. Otherwise, the user daemon will send a reopen request and wait for itself to process the request. Signed-off-by: Jia Zhu Reviewed-by: Xin Yin Reviewed-by: Jingbo Xu Signed-off-by: Jia Zhu Signed-off-by: Jingbo Xu --- fs/cachefiles/internal.h | 3 + fs/cachefiles/ondemand.c | 125 ++++++++++++++++++++++++--------------- 2 files changed, 79 insertions(+), 49 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index c44782e12ab1..9f460188e1a8 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -34,9 +34,11 @@ extern unsigned cachefiles_debug; enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_close, /* Anonymous fd closed by daemon or initial state */ CACHEFILES_ONDEMAND_OBJSTATE_open, /* Anonymous fd associated with object is available */ + CACHEFILES_ONDEMAND_OBJSTATE_reopening, /* Object that was closed and is being reopened. */ }; struct cachefiles_ondemand_info { + struct work_struct work; int ondemand_id; enum cachefiles_object_state state; struct cachefiles_object *object; @@ -290,6 +292,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open); CACHEFILES_OBJECT_STATE_FUNCS(close); +CACHEFILES_OBJECT_STATE_FUNCS(reopening); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen, loff_t *pos) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 9bd328a64bc8..bff63ca2e691 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -23,18 +23,14 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object); - /* - * Flush all pending READ requests since their completion depends on - * anon_fd. - */ - radix_tree_for_each_slot(slot, &cache->reqs, &iter, 0) { + /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ + radix_tree_for_each_tagged(slot, &cache->reqs, &iter, 0, CACHEFILES_REQ_NEW) { req = radix_tree_deref_slot_protected(slot, &cache->reqs.xa_lock); if (WARN_ON(!req)) continue; if (req->msg.object_id == object_id && - req->msg.opcode == CACHEFILES_OP_READ) { - req->error = -EIO; + req->msg.opcode == CACHEFILES_OP_CLOSE) { complete(&req->done); radix_tree_iter_delete(&cache->reqs, &iter, slot); } @@ -171,6 +167,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); cachefiles_ondemand_set_object_open(req->object); + wake_up_all(&cache->daemon_pollwq); out: complete(&req->done); @@ -221,7 +218,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; - req->msg.object_id = object_id; object->private->ondemand_id = object_id; cachefiles_get_unbind_pincount(cache); @@ -239,16 +235,58 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) return ret; } +static void ondemand_object_worker(struct work_struct *work) +{ + struct cachefiles_object *object; + + object = ((struct cachefiles_ondemand_info *)work)->object; + cachefiles_ondemand_init_object(object); +} + +/* + * Find a request to be handled in the range of [start, end]. If there are any + * inflight or subsequent READ requests on the closed object, reopen it. Skip + * read requests whose related object is reopening. + */ +static struct cachefiles_req *cachefiles_ondemand_select_req(struct cachefiles_cache *cache, + struct radix_tree_iter *iter, + unsigned long start, + unsigned long end) +{ + void **slot; + struct cachefiles_req *req; + struct cachefiles_ondemand_info *info; + + radix_tree_for_each_tagged(slot, &cache->reqs, iter, start, CACHEFILES_REQ_NEW) { + req = radix_tree_deref_slot_protected(slot, &cache->reqs.xa_lock); + if (WARN_ON(!req)) + return NULL; + if (iter->index > end) + return NULL; + if (req->msg.opcode != CACHEFILES_OP_READ) + return req; + info = req->object->private; + if (cachefiles_ondemand_object_is_close(req->object)) { + cachefiles_ondemand_set_object_reopening(req->object); + queue_work(fscache_object_wq, &info->work); + continue; + } else if (cachefiles_ondemand_object_is_reopening(req->object)) { + continue; + } + return req; + } + return NULL; +} + ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen, loff_t *pos) { - struct cachefiles_req *req = NULL; + struct cachefiles_req *req; struct cachefiles_msg *msg; unsigned long id = 0; size_t n; int ret = 0; struct radix_tree_iter iter; - void **slot; /* * Cyclically search for a request that has not ever been processed, @@ -256,25 +294,9 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, * request distribution fair. */ xa_lock(&cache->reqs); - radix_tree_for_each_tagged(slot, &cache->reqs, &iter, cache->req_id_next, - CACHEFILES_REQ_NEW) { - req = radix_tree_deref_slot_protected(slot, &cache->reqs.xa_lock); - WARN_ON(!req); - break; - } - - if (!req && cache->req_id_next > 0) { - radix_tree_for_each_tagged(slot, &cache->reqs, &iter, 0, - CACHEFILES_REQ_NEW) { - if (iter.index >= cache->req_id_next) - break; - req = radix_tree_deref_slot_protected(slot, &cache->reqs.xa_lock); - WARN_ON(!req); - break; - } - } - - /* no request tagged with CACHEFILES_REQ_NEW found */ + req = cachefiles_ondemand_select_req(cache, &iter, cache->req_id_next, ULONG_MAX); + if (!req && cache->req_id_next > 0) + req = cachefiles_ondemand_select_req(cache, &iter, 0, cache->req_id_next - 1); if (!req) { xa_unlock(&cache->reqs); return 0; @@ -293,14 +315,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xa_unlock(&cache->reqs); id = iter.index; - msg->msg_id = id; if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); - if (ret) + if (ret) { + cachefiles_ondemand_set_object_close(req->object); goto error; + } } + msg->msg_id = id; + msg->object_id = req->object->private->ondemand_id; + if (copy_to_user(_buffer, msg, n) != 0) { ret = -EFAULT; goto err_put_fd; @@ -338,7 +364,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, { static atomic64_t global_index = ATOMIC64_INIT(0); struct cachefiles_cache *cache; - struct cachefiles_req *req; + struct cachefiles_req *req = NULL; long id; int ret; @@ -348,12 +374,16 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return 0; - if (test_bit(CACHEFILES_DEAD, &cache->flags)) - return -EIO; + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + ret = -EIO; + goto out; + } req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + if (!req) { + ret = -ENOMEM; + goto out; + } req->object = object; init_completion(&req->done); @@ -389,7 +419,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); - if (opcode != CACHEFILES_OP_OPEN && + if (opcode == CACHEFILES_OP_CLOSE && !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->private->ondemand_id == 0); xa_unlock(&cache->reqs); @@ -407,7 +437,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, wake_up_all(&cache->daemon_pollwq); wait_for_completion(&req->done); ret = req->error; + kfree(req); + return ret; out: + /* Reset the object to close state in error handling path. + * If error occurs after creating the anonymous fd, + * cachefiles_ondemand_fd_release() will set object to close. + */ + if (opcode == CACHEFILES_OP_OPEN) + cachefiles_ondemand_set_object_close(req->object); kfree(req); return ret; } @@ -457,8 +495,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - - req->msg.object_id = object->private->ondemand_id; return 0; } @@ -470,19 +506,9 @@ struct cachefiles_read_ctx { static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, void *private) { - struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->private->ondemand_id; - - /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (!cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object_id == 0); - pr_info_once("READ: anonymous fd closed prematurely.\n"); - return -EIO; - } - req->msg.object_id = object_id; load->off = read_ctx->off; load->len = read_ctx->len; return 0; @@ -540,5 +566,6 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object) return -ENOMEM; object->private->object = object; + INIT_WORK(&object->private->work, ondemand_object_worker); return 0; } -- Gitee From e23ff64ca4c7aa15da586ab6bb703f0836572f88 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Tue, 5 Jul 2022 19:37:17 +0800 Subject: [PATCH 5/6] anolis: cachefiles: narrow the scope of triggering EPOLLIN events in ondemand mode ANBZ: #3213 cherry-picked from https://lore.kernel.org/lkml/20221014080559.42108-5-zhujia.zj@bytedance.com/T/#m9cb8de9d34a8e366bbec1bc74e6ccf769fff6360 Don't trigger EPOLLIN when there are only reopening read requests in xarray. Suggested-by: Xin Yin Signed-off-by: Jia Zhu Reviewed-by: Jingbo Xu Signed-off-by: Jingbo Xu --- fs/cachefiles/daemon.c | 16 ++++++++++++++-- fs/cachefiles/internal.h | 12 ++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index b531373400d7..075c227a336b 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -359,14 +359,26 @@ static __poll_t cachefiles_daemon_poll(struct file *file, struct poll_table_struct *poll) { struct cachefiles_cache *cache = file->private_data; + struct cachefiles_req *req; + struct radix_tree_iter iter; __poll_t mask; + void **slot; poll_wait(file, &cache->daemon_pollwq, poll); mask = 0; if (cachefiles_in_ondemand_mode(cache)) { - if (!radix_tree_empty(&cache->reqs)) - mask |= EPOLLIN; + if (!radix_tree_empty(&cache->reqs)) { + radix_tree_for_each_tagged(slot, &cache->reqs, &iter, 0, + CACHEFILES_REQ_NEW) { + req = radix_tree_deref_slot_protected(slot, + &cache->reqs.xa_lock); + if (!cachefiles_ondemand_is_reopening_read(req)) { + mask |= EPOLLIN; + break; + } + } + } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) mask |= EPOLLIN; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 9f460188e1a8..435e8168f26f 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -293,6 +293,13 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open); CACHEFILES_OBJECT_STATE_FUNCS(close); CACHEFILES_OBJECT_STATE_FUNCS(reopening); + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return cachefiles_ondemand_object_is_reopening(req->object) && + req->msg.opcode == CACHEFILES_OP_READ; +} + #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen, loff_t *pos) @@ -318,6 +325,11 @@ static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *ob { return 0; } + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return false; +} #endif /* -- Gitee From 81923b43faa81dded12a97b024ea4e895e778dba Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Sun, 5 Jun 2022 21:09:29 +0800 Subject: [PATCH 6/6] anolis: cachefiles: add restore command to recover inflight ondemand read requests ANBZ: #3213 cherry-picked from https://lore.kernel.org/lkml/20221014080559.42108-5-zhujia.zj@bytedance.com/T/#mf15c2552bf5bbb1b5631c82e713cbfdb1b04b5d0 Previously, in ondemand read scenario, if the anonymous fd was closed by user daemon, inflight and subsequent read requests would return EIO. As long as the device connection is not released, user daemon can hold and restore inflight requests by setting the request flag to CACHEFILES_REQ_NEW. Suggested-by: Gao Xiang Signed-off-by: Jia Zhu Signed-off-by: Xin Yin Reviewed-by: Jingbo Xu [jingbo: use xas_for_each since radix_tree_iter_tag_set is unavailable] Signed-off-by: Jingbo Xu --- fs/cachefiles/daemon.c | 1 + fs/cachefiles/internal.h | 3 +++ fs/cachefiles/ondemand.c | 23 +++++++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 075c227a336b..4bb81e003ae1 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -75,6 +75,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "tag", cachefiles_daemon_tag }, #ifdef CONFIG_CACHEFILES_ONDEMAND { "copen", cachefiles_ondemand_copen }, + { "restore", cachefiles_ondemand_restore }, #endif { "", NULL } }; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 435e8168f26f..f975042c1658 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -270,6 +270,9 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args); +extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache, + char *args); + extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index bff63ca2e691..7a6af4de88ed 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -174,6 +174,29 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) return ret; } +int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + + XA_STATE(xas, &cache->reqs, 0); + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + /* + * Reset the requests to CACHEFILES_REQ_NEW state, so that the + * requests have been processed halfway before the crash of the + * user daemon could be reprocessed after the recovery. + */ + xas_lock(&xas); + xas_for_each(&xas, req, ULONG_MAX) + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + + wake_up_all(&cache->daemon_pollwq); + return 0; +} + static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) { struct cachefiles_object *object = req->object; -- Gitee