[Devel] [PATCH 2/2] fs/fuse kio: simplify processing and sending kio requests
Kirill Tkhai
ktkhai at virtuozzo.com
Mon May 27 18:33:04 MSK 2019
On 27.05.2019 18:21, Pavel Butsykin wrote:
>
>
> On 27.05.2019 18:06, Kirill Tkhai wrote:
>> On 27.05.2019 14:59, Pavel Butsykin wrote:
>>> This patch makes functions kpcs_req_classify() and pcs_fuse_submit() easier,
>>> reducing the number of cases to handle kio requests.
>>>
>>> Also, this patch allows us to get rid of the additional context switch, when
>>> kio request is moved to cc->work_queue list under bg_lock. After this patch,
>>> execution of kpcs_req_send() is available only without fc->bg_lock.
>>>
>>> Signed-off-by: Pavel Butsykin <pbutsykin at virtuozzo.com>
>>> ---
>>> fs/fuse/dev.c | 30 ++++++++-------
>>> fs/fuse/fuse_i.h | 2 +-
>>> fs/fuse/kio/pcs/pcs_fuse_kdirect.c | 76 +++++++++++++-------------------------
>>> 3 files changed, 43 insertions(+), 65 deletions(-)
>>>
>>> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
>>> index 3e0fecbe8db3..1ed8537b2be9 100644
>>> --- a/fs/fuse/dev.c
>>> +++ b/fs/fuse/dev.c
>>> @@ -393,27 +393,39 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
>>>
>>> static void flush_bg_queue(struct fuse_conn *fc, struct fuse_iqueue *fiq)
>>> {
>>> + struct fuse_req *req, *next;
>>> + LIST_HEAD(kio_reqs);
>>> +
>>> while (fc->active_background < fc->max_background &&
>>> !list_empty(&fc->bg_queue)) {
>>> - struct fuse_req *req;
>>>
>>> req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
>>> - list_del_init(&req->list);
>>> fc->active_background++;
>>>
>>> if (fc->kio.op) {
>>> int ret = fc->kio.op->req_classify(fc, req, true, true);
>>> if (likely(!ret)) {
>>> - fc->kio.op->req_send(fc, req, NULL, true, true);
>>> + list_move_tail(&req->list, &kio_reqs);
>>> continue;
>>> - } else if (ret < 0)
>>> + } else if (ret < 0) {
>>> + list_del_init(&req->list);
>>> continue;
>>> + }
>>> }
>>> + list_del_init(&req->list);
>>> +
>>> spin_lock(&fiq->waitq.lock);
>>> req->in.h.unique = fuse_get_unique(fiq);
>>> queue_request(fiq, req);
>>> spin_unlock(&fiq->waitq.lock);
>>> }
>>> +
>>> + spin_unlock(&fc->bg_lock);
>>> + list_for_each_entry_safe(req, next, &kio_reqs, list) {
>>> + list_del_init(&req->list);
>>> + fc->kio.op->req_send(fc, req, NULL, true);
>>> + }
>>> + spin_lock(&fc->bg_lock);
>>> }
>>>
>>> /*
>>> @@ -563,7 +575,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req,
>>> if (fc->kio.op) {
>>> int ret = fc->kio.op->req_classify(fc, req, false, false);
>>> if (likely(!ret))
>>> - return fc->kio.op->req_send(fc, req, ff, false, false);
>>> + return fc->kio.op->req_send(fc, req, ff, false);
>>> else if (ret < 0)
>>> return;
>>> }
>>> @@ -656,14 +668,6 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
>>> {
>>> WARN_ON(!req->end);
>>>
>>> - if (fc->kio.op) {
>>> - int ret = fc->kio.op->req_classify(fc, req, true, false);
>>> - if (likely(!ret))
>>> - return fc->kio.op->req_send(fc, req, NULL, true, false);
>>> - else if (ret < 0)
>>> - return;
>>> - }
>>> -
>>> if (!fuse_request_queue_background(fc, req)) {
>>> if (!req->out.h.error)
>>> req->out.h.error = -ENOTCONN;
>>> diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
>>> index 7978a1d891d2..34a4317a9689 100644
>>> --- a/fs/fuse/fuse_i.h
>>> +++ b/fs/fuse/fuse_i.h
>>> @@ -530,7 +530,7 @@ struct fuse_kio_ops {
>>> int (*req_classify)(struct fuse_conn *fc, struct fuse_req *req, bool bg,
>>> bool locked);
>>> void (*req_send)(struct fuse_conn *fc, struct fuse_req *req,
>>> - struct fuse_file *ff, bool bg, bool locked);
>>> + struct fuse_file *ff, bool bg);
>>>
>>> /* Inode scope hooks */
>>> int (*file_open)(struct fuse_conn *fc, struct file *file,
>>> diff --git a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>>> index a101d3950373..c15b690b0d30 100644
>>> --- a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>>> +++ b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>>> @@ -947,7 +947,7 @@ out:
>>> }
>>>
>>> static void pcs_fuse_submit(struct pcs_fuse_cluster *pfc, struct fuse_req *req,
>>> - struct fuse_file *ff, bool async, bool lk)
>>> + struct fuse_file *ff)
>>> {
>>> struct pcs_fuse_req *r = pcs_req_from_fuse(req);
>>> struct fuse_inode *fi = get_fuse_inode(req->io_inode);
>>> @@ -1040,11 +1040,7 @@ static void pcs_fuse_submit(struct pcs_fuse_cluster *pfc, struct fuse_req *req,
>>> error:
>>> DTRACE("do fuse_request_end req:%p op:%d err:%d\n", req, req->in.h.opcode, req->out.h.error);
>>>
>>> - if (lk)
>>> - spin_unlock(&pfc->fc->bg_lock);
>>> request_end(pfc->fc, req);
>>> - if (lk)
>>> - spin_lock(&pfc->fc->bg_lock);
>>> return;
>>>
>>> submit:
>>> @@ -1052,11 +1048,7 @@ submit:
>>> req->out.h.error = -EIO;
>>> goto error;
>>> }
>>> -
>>> - if (async)
>>> - pcs_cc_submit(ireq->cc, ireq);
>>> - else
>>> - ireq_process(ireq);
>>> + ireq_process(ireq);
>>> }
>>>
>>> static void kpcs_setattr_end(struct fuse_conn *fc, struct fuse_req *req)
>>> @@ -1119,7 +1111,7 @@ static void _pcs_shrink_end(struct fuse_conn *fc, struct fuse_req *req)
>>>
>>> TRACE("resubmit %p\n", &r->req);
>>> list_del_init(&ireq->list);
>>> - pcs_fuse_submit(pfc, &r->req, r->req.ff, true, false);
>>> + pcs_fuse_submit(pfc, &r->req, r->req.ff);
>>> }
>>> }
>>>
>>> @@ -1232,64 +1224,46 @@ static int kpcs_req_classify(struct fuse_conn* fc, struct fuse_req *req,
>>> return 1;
>>>
>>> BUG_ON(!pfc);
>>> - /* At this point request can not belongs to any list
>>> - * so we can avoid grab fc->lock here at all.
>>> - */
>>> - BUG_ON(!list_empty(&req->list));
>>> -
>>> DTRACE("Classify req:%p op:%d end:%p bg:%d lk:%d\n", req, req->in.h.opcode,
>>> req->end, bg, lk);
>>> ret = pcs_kio_classify_req(fc, req, lk);
>>> - if (ret) {
>>> - if (ret < 0) {
>>> - if (!bg)
>>> - atomic_inc(&req->count);
>>> - __clear_bit(FR_PENDING, &req->flags);
>>> - req->out.h.error = ret;
>>> - if (lk)
>>> - spin_unlock(&fc->bg_lock);
>>> - request_end(fc, req);
>>> - if (lk)
>>> - spin_lock(&fc->bg_lock);
>>> - return ret;
>>> - }
>>> - return 1;
>>> - }
>>> + if (likely(!ret))
>>> + return 0;
>>>
>>> - if (!lk) {
>>> - spin_lock(&fc->bg_lock);
>>> - if (fc->num_background + 1 >= fc->max_background ||
>>> - !fc->connected) {
>>> + if (ret < 0) {
>>> + if (!bg)
>>> + atomic_inc(&req->count);
>>> + __clear_bit(FR_PENDING, &req->flags);
>>> + req->out.h.error = ret;
>>> + if (lk)
>>> spin_unlock(&fc->bg_lock);
>>> - return 1;
>>> - }
>>> - fc->num_background++;
>>> - fc->active_background++;
>>> -
>>> - if (fc->num_background == fc->congestion_threshold &&
>>> - fc->bdi_initialized) {
>>> - set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
>>> - set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
>>> - }
>>> - spin_unlock(&fc->bg_lock);
>>> + request_end(fc, req);
>>> + if (lk)
>>> + spin_lock(&fc->bg_lock);
>>> + return ret;
>>> }
>>> - return 0;
>>> + return 1;
>>> }
>>>
>>> static void kpcs_req_send(struct fuse_conn* fc, struct fuse_req *req,
>>> - struct fuse_file *ff, bool bg, bool lk)
>>> + struct fuse_file *ff, bool bg)
>>> {
>>> struct pcs_fuse_cluster *pfc = (struct pcs_fuse_cluster*)fc->kio.ctx;
>>>
>>> - TRACE("Send req:%p op:%d end:%p bg:%d lk:%d\n",
>>> - req, req->in.h.opcode, req->end, bg, lk);
>>> + /* At this point request can not belongs to any list
>>> + * so we can avoid grab fc->lock here at all.
>>> + */
>>> + BUG_ON(!list_empty(&req->list));
>>> +
>>> + TRACE("Send req:%p op:%d end:%p bg:%d\n",
>>> + req, req->in.h.opcode, req->end, bg);
>>>
>>> /* request_end below will do fuse_put_request() */
>>> if (!bg)
>>> atomic_inc(&req->count);
>>> __clear_bit(FR_PENDING, &req->flags);
>>>
>>> - pcs_fuse_submit(pfc, req, ff ? : req->ff, lk, lk);
>>> + pcs_fuse_submit(pfc, req, ff ? : req->ff);
>>> if (!bg)
>>> wait_event(req->waitq,
>>> test_bit(FR_FINISHED, &req->flags) && !req->end);
>>>
>>
>> Possible further simplification (up to you). This should go in a separate patch,
>> if we decide we do it.
>>
>> diff --git a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>> index c15b690b0d30..7fe0d764fdab 100644
>> --- a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>> +++ b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
>> @@ -1230,9 +1230,11 @@ static int kpcs_req_classify(struct fuse_conn* fc, struct fuse_req *req,
>> if (likely(!ret))
>> return 0;
>>
>> + /* request_end below will do fuse_put_request() */
>> + if (!bg)
>> + atomic_inc(&req->count);
>> +
>
> In case ret == 1 we will have req leak. We can do it somehow:
1 is plain fuse, then OK.
> if (ret == 1)
> return ret;
>
> /* request_end below will do fuse_put_request() */
> if (!bg)
> atomic_inc(&req->count);
>
> if (!ret)
> return ret;
>
> ...
> handle error case
> ...
>
> But I don't like additional 'if' on the most probable way
More information about the Devel
mailing list