[Devel] [PATCH 3/8] fuse: do not take fc->lock in fuse_request_send_background()
Pavel Butsykin
pbutsykin at virtuozzo.com
Tue Apr 23 12:33:28 MSK 2019
On 23.04.2019 12:21, Kirill Tkhai wrote:
> On 23.04.2019 11:51, Pavel Butsykin wrote:
>>
>>
>> On 23.04.2019 11:40, Kirill Tkhai wrote:
>>> On 23.04.2019 11:36, Pavel Butsykin wrote:
>>>> On 23.04.2019 10:48, Kirill Tkhai wrote:
>>>>> On 23.04.2019 09:56, Pavel Butsykin wrote:
>>>>>>
>>>>>>
>>>>>> On 03.04.2019 18:37, Kirill Tkhai wrote:
>>>>>>> ms commit 63825b4e1da5
>>>>>>>
>>>>>>> Currently, we take fc->lock there only to check for fc->connected.
>>>>>>> But this flag is changed only on connection abort, which is very
>>>>>>> rare operation.
>>>>>>>
>>>>>>> So allow checking fc->connected under just fc->bg_lock and use this lock
>>>>>>> (as well as fc->lock) when resetting fc->connected.
>>>>>>>
>>>>>>> Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
>>>>>>> Signed-off-by: Miklos Szeredi <mszeredi at redhat.com>
>>>>>>> ---
>>>>>>> fs/fuse/dev.c | 73 +++++++++++++++++++++++++++---------------------------
>>>>>>> fs/fuse/file.c | 4 ++-
>>>>>>> fs/fuse/fuse_i.h | 3 +-
>>>>>>> 3 files changed, 41 insertions(+), 39 deletions(-)
>>>>>>>
>>>>>>> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
>>>>>>> index 1ffc10ff18ba..1355f4a0a8e4 100644
>>>>>>> --- a/fs/fuse/dev.c
>>>>>>> +++ b/fs/fuse/dev.c
>>>>>>> @@ -598,69 +598,70 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
>>>>>>> }
>>>>>>> EXPORT_SYMBOL_GPL(fuse_request_send);
>>>>>>>
>>>>>>> -/*
>>>>>>> - * Called under fc->lock
>>>>>>> - *
>>>>>>> - * fc->connected must have been checked previously
>>>>>>> - */
>>>>>>> -void fuse_request_send_background_nocheck(struct fuse_conn *fc,
>>>>>>> - struct fuse_req *req)
>>>>>>> +bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
>>>>>>> {
>>>>>>> struct fuse_iqueue *fiq = req->fiq;
>>>>>>> + bool queued = false;
>>>>>>>
>>>>>>> - BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
>>>>>>> -
>>>>>>> + WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
>>>>>>> if (!test_bit(FR_WAITING, &req->flags)) {
>>>>>>> __set_bit(FR_WAITING, &req->flags);
>>>>>>> atomic_inc(&fc->num_waiting);
>>>>>>> }
>>>>>>> __set_bit(FR_ISREPLY, &req->flags);
>>>>>>> spin_lock(&fc->bg_lock);
>>>>>>> - fc->num_background++;
>>>>>>> - if (fc->num_background == fc->max_background)
>>>>>>> - fc->blocked = 1;
>>>>>>> - if (fc->num_background == fc->congestion_threshold &&
>>>>>>> - fc->bdi_initialized) {
>>>>>>> - set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
>>>>>>> - set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
>>>>>>> - }
>>>>>>> + if (likely(fc->connected)) {
>>>>>>> + fc->num_background++;
>>>>>>> + if (fc->num_background == fc->max_background)
>>>>>>> + fc->blocked = 1;
>>>>>>> + if (fc->num_background == fc->congestion_threshold && fc->sb) {
>>>>>>> + set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
>>>>>>> + set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
>>>>>>> + }
>>>>>>>
>>>>>>> - if (test_bit(FR_NONBLOCKING, &req->flags)) {
>>>>>>> - fc->active_background++;
>>>>>>> - spin_lock(&fiq->waitq.lock);
>>>>>>> - req->in.h.unique = fuse_get_unique(fiq);
>>>>>>> - queue_request(fiq, req);
>>>>>>> - spin_unlock(&fiq->waitq.lock);
>>>>>>> - goto unlock;
>>>>>>> - }
>>>>>>> + if (test_bit(FR_NONBLOCKING, &req->flags)) {
>>>>>>> + fc->active_background++;
>>>>>>> + spin_lock(&fiq->waitq.lock);
>>>>>>> + req->in.h.unique = fuse_get_unique(fiq);
>>>>>>> + queue_request(fiq, req);
>>>>>>> + spin_unlock(&fiq->waitq.lock);
>>>>>>> + queued = true;
>>>>>>> + goto unlock;
>>>>>>> + }
>>>>>>>
>>>>>>> - list_add_tail(&req->list, &fc->bg_queue);
>>>>>>> - flush_bg_queue(fc, fiq);
>>>>>>> + list_add_tail(&req->list, &fc->bg_queue);
>>>>>>> + flush_bg_queue(fc, fiq);
>>>>>>> + queued = true;
>>>>>>> + }
>>>>>>> unlock:
>>>>>>> spin_unlock(&fc->bg_lock);
>>>>>>> + return queued;
>>>>>>> }
>>>>>>>
>>>>>>> void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
>>>>>>> {
>>>>>>> - BUG_ON(!req->end);
>>>>>>> + bool fail;
>>>>>>> + WARN_ON(!req->end);
>>>>>>>
>>>>>>> if (fc->kio.op && !fc->kio.op->req_send(fc, req, true, false))
>>>>>>> return;
>>>>>>>
>>>>>>> spin_lock(&fc->lock);
>>>>>>> - if (req->page_cache && req->ff &&
>>>>>>> - test_bit(FUSE_S_FAIL_IMMEDIATELY, &req->ff->ff_state)) {
>>>>>>> + fail = (req->page_cache && req->ff &&
>>>>>>> + test_bit(FUSE_S_FAIL_IMMEDIATELY, &req->ff->ff_state));
>>>>>>> + spin_unlock(&fc->lock);
>>>>>>> +
>>>>>>> + if (fail) {
>>>>>>> + /* FIXME */
>>>>>>
>>>>>> What needs to be fixed here?
>>>>>
>>>>> Commit aims to remove fc->lock from this function (and it's called "fuse: do not
>>>>> take fc->lock in fuse_request_send_background()). But FUSE_S_FAIL_IMMEDIATELY
>>>>> vstorage crutch is made under fc->lock, so the lock remains, and "FIXME" is there.
>>>>
>>>> There already fi->lock.
>>>>
>>>>> I think the lock is not needed, since the crutch's bit FUSE_S_FAIL_IMMEDIATELY
>>>>> is set under lock, but we do not need for all queued requests in fuse_invalidate_files()
>>>>> after the bit is set. So, I remain this logic, and you may decide whether you
>>>>> need the lock or not.
>>>>
>>>> Ok, Unfortunately loсk is needed here, but fc->bg_lock instead of
>>>> fc->lock to synchronize test_bit(FUSE_S_FAIL_IMMEDIATELY) and
>>>> fc->bg_queue. Possible race:
>>>>
>>> Your formatting lost all spaces.
>>>
>>
>> One more try:
>>
>> fuse_request_send_background():........................fuse_invalidate_files():
>> test_bit(FUSE_S_FAIL_IMMEDIATELY,.&ff->ff_state)
>> ........................................................spin_lock(&fi->lock);
>> ........................................................list_for_each_entry(ff,.&fi->rw_files,.rw_entry)
>> ..........................................................set_bit(FUSE_S_FAIL_IMMEDIATELY,.&ff->ff_state);
>> ........................................................spin_unlock(&fi->lock);
>>
>> ........................................................spin_lock(&fc->lock);
>>
>> ........................................................spin_lock(&fc->bg_lock);
>> ........................................................fuse_kill_requests(fc,.inode,.&fc->bg_queue);
>> ........................................................spin_unlock(&fc->bg_lock);
>>
>> fuse_request_queue_background():
>> spin_lock(&fc->bg_lock);
>> list_add_tail(&req->list,.&fc->bg_queue);
>> flush_bg_queue(fc,.fiq);
>> spin_unlock(&fc->bg_lock);
>>
>> ........................................................spin_unlock(&fc->lock);
>
> Yeah, but fc->lock is still *not needed*.
I said - "loсk is needed here, but fc->bg_lock instead of fc->lock".
> We may just move immediate checking logic into fuse_request_queue_background():
>
> fuse: do not take fc->lock in fuse_request_send_background() - fixup
>
> Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
> ---
> fs/fuse/dev.c | 24 ++++++------------------
> 1 file changed, 6 insertions(+), 18 deletions(-)
>
> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
> index 1355f4a0a8e4..bfc792c9b5dd 100644
> --- a/fs/fuse/dev.c
> +++ b/fs/fuse/dev.c
> @@ -610,7 +610,10 @@ bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
> }
> __set_bit(FR_ISREPLY, &req->flags);
> spin_lock(&fc->bg_lock);
> - if (likely(fc->connected)) {
> + if (unlikely(req->page_cache && req->ff &&
> + test_bit(FUSE_S_FAIL_IMMEDIATELY, &req->ff->ff_state)))
> + req->out.h.error = -EIO;
There's another place where synchronization with FUSE_S_FAIL_IMMEDIATELY will
still be broken:
/* Called under fi->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
__releases(fi->lock)
__acquires(fi->lock)
{
...
if (test_bit(FUSE_S_FAIL_IMMEDIATELY, &req->ff->ff_state)) {
> + else if (likely(fc->connected)) {
> fc->num_background++;
> if (fc->num_background == fc->max_background)
> fc->blocked = 1;
> @@ -640,29 +643,14 @@ bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req)
>
> void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
> {
> - bool fail;
> WARN_ON(!req->end);
>
> if (fc->kio.op && !fc->kio.op->req_send(fc, req, true, false))
> return;
>
> - spin_lock(&fc->lock);
> - fail = (req->page_cache && req->ff &&
> - test_bit(FUSE_S_FAIL_IMMEDIATELY, &req->ff->ff_state));
> - spin_unlock(&fc->lock);
> -
> - if (fail) {
> - /* FIXME */
> - BUG_ON(req->in.h.opcode != FUSE_READ);
> - req->out.h.error = -EIO;
> - __clear_bit(FR_BACKGROUND, &req->flags);
> - __clear_bit(FR_PENDING, &req->flags);
I'm not sure we can drop this stuff.
> - request_end(fc, req);
> - return;
> - }
> -
> if (!fuse_request_queue_background(fc, req)) {
> - req->out.h.error = -ENOTCONN;
> + if (!req->out.h.error)
> + req->out.h.error = -ENOTCONN;
> req->end(fc, req);
> fuse_put_request(fc, req);
> }
>
More information about the Devel
mailing list