qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] virtiofsd: vu_dispatch locking should never fail


From: Greg Kurz
Subject: Re: [PATCH] virtiofsd: vu_dispatch locking should never fail
Date: Wed, 3 Feb 2021 17:08:57 +0100

On Wed, 3 Feb 2021 10:59:34 -0500
Vivek Goyal <vgoyal@redhat.com> wrote:

> On Fri, Jan 29, 2021 at 04:53:12PM +0100, Greg Kurz wrote:
> > pthread_rwlock_rdlock() and pthread_rwlock_wrlock() can fail if a
> > deadlock condition is detected or the current thread already owns
> > the lock. They can also fail, like pthread_rwlock_unlock(), if the
> > mutex wasn't properly initialized. None of these are ever expected
> > to happen with fv_VuDev::vu_dispatch_rwlock.
> > 
> > Some users already check the return value and assert, some others
> > don't. Introduce rdlock/wrlock/unlock wrappers that just do the
> > former and use them everywhere.
> > 
> > Signed-off-by: Greg Kurz <groug@kaod.org>
> > ---
> >  tools/virtiofsd/fuse_virtio.c | 42 +++++++++++++++++++++++------------
> >  1 file changed, 28 insertions(+), 14 deletions(-)
> > 
> > diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
> > index ddcefee4272f..7ea269c4b65d 100644
> > --- a/tools/virtiofsd/fuse_virtio.c
> > +++ b/tools/virtiofsd/fuse_virtio.c
> > @@ -187,6 +187,24 @@ static void copy_iov(struct iovec *src_iov, int 
> > src_count,
> >      }
> >  }
> >  
> > +/*
> > + * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
> > + * a deadlock condition is detected or the current thread already
> > + * owns the lock. They can also fail, like pthread_rwlock_unlock(),
> > + * if the mutex wasn't properly initialized. None of these are ever
> > + * expected to happen.
> > + */
> > +#define VU_DISPATCH_LOCK_OP(op)                              \
> > +static inline void vu_dispatch_##op(struct fv_VuDev *vud)    \
> > +{                                                            \
> > +    int ret = pthread_rwlock_##op(&vud->vu_dispatch_rwlock); \
> > +    assert(ret == 0);                                        \
> > +}
> > +
> > +VU_DISPATCH_LOCK_OP(rdlock);
> > +VU_DISPATCH_LOCK_OP(wrlock);
> > +VU_DISPATCH_LOCK_OP(unlock);
> > +
> 
> I generally do not prefer using macros to define functions as searching
> to functions declarations/definitions becomes harder. But I see lot
> of people prefer that because they can reduce number of lines of code.
> 

Well, I must admit I hesitated since this doesn't gain much in
terms of LoC compared to the expanded version. I'm perfectly
fine with dropping the macro in my v2 if this looks better
to you.

> Apart from that one issue of using rdlock in fv_queue_thread(), stefan
> pointed, it looks good to me.
> 
> Reviewed-by: Vivek Goyal <vgoyal@redhat.com>
> 
> Vivek
> >  /*
> >   * Called back by ll whenever it wants to send a reply/message back
> >   * The 1st element of the iov starts with the fuse_out_header
> > @@ -240,12 +258,12 @@ int virtio_send_msg(struct fuse_session *se, struct 
> > fuse_chan *ch,
> >  
> >      copy_iov(iov, count, in_sg, in_num, tosend_len);
> >  
> > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_rdlock(qi->virtio_dev);
> >      pthread_mutex_lock(&qi->vq_lock);
> >      vu_queue_push(dev, q, elem, tosend_len);
> >      vu_queue_notify(dev, q);
> >      pthread_mutex_unlock(&qi->vq_lock);
> > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_unlock(qi->virtio_dev);
> >  
> >      req->reply_sent = true;
> >  
> > @@ -403,12 +421,12 @@ int virtio_send_data_iov(struct fuse_session *se, 
> > struct fuse_chan *ch,
> >  
> >      ret = 0;
> >  
> > -    pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_rdlock(qi->virtio_dev);
> >      pthread_mutex_lock(&qi->vq_lock);
> >      vu_queue_push(dev, q, elem, tosend_len);
> >      vu_queue_notify(dev, q);
> >      pthread_mutex_unlock(&qi->vq_lock);
> > -    pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +    vu_dispatch_unlock(qi->virtio_dev);
> >  
> >  err:
> >      if (ret == 0) {
> > @@ -558,12 +576,12 @@ out:
> >          fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__,
> >                   elem->index);
> >  
> > -        pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_rdlock(qi->virtio_dev);
> >          pthread_mutex_lock(&qi->vq_lock);
> >          vu_queue_push(dev, q, elem, 0);
> >          vu_queue_notify(dev, q);
> >          pthread_mutex_unlock(&qi->vq_lock);
> > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(qi->virtio_dev);
> >      }
> >  
> >      pthread_mutex_destroy(&req->ch.lock);
> > @@ -596,7 +614,6 @@ static void *fv_queue_thread(void *opaque)
> >               qi->qidx, qi->kick_fd);
> >      while (1) {
> >          struct pollfd pf[2];
> > -        int ret;
> >  
> >          pf[0].fd = qi->kick_fd;
> >          pf[0].events = POLLIN;
> > @@ -645,8 +662,7 @@ static void *fv_queue_thread(void *opaque)
> >              break;
> >          }
> >          /* Mutual exclusion with virtio_loop() */
> > -        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > -        assert(ret == 0); /* there is no possible error case */
> > +        vu_dispatch_wrlock(qi->virtio_dev);
> >          pthread_mutex_lock(&qi->vq_lock);
> >          /* out is from guest, in is too guest */
> >          unsigned int in_bytes, out_bytes;
> > @@ -672,7 +688,7 @@ static void *fv_queue_thread(void *opaque)
> >          }
> >  
> >          pthread_mutex_unlock(&qi->vq_lock);
> > -        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(qi->virtio_dev);
> >  
> >          /* Process all the requests. */
> >          if (!se->thread_pool_size && req_list != NULL) {
> > @@ -799,7 +815,6 @@ int virtio_loop(struct fuse_session *se)
> >      while (!fuse_session_exited(se)) {
> >          struct pollfd pf[1];
> >          bool ok;
> > -        int ret;
> >          pf[0].fd = se->vu_socketfd;
> >          pf[0].events = POLLIN;
> >          pf[0].revents = 0;
> > @@ -825,12 +840,11 @@ int virtio_loop(struct fuse_session *se)
> >          assert(pf[0].revents & POLLIN);
> >          fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
> >          /* Mutual exclusion with fv_queue_thread() */
> > -        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
> > -        assert(ret == 0); /* there is no possible error case */
> > +        vu_dispatch_wrlock(se->virtio_dev);
> >  
> >          ok = vu_dispatch(&se->virtio_dev->dev);
> >  
> > -        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
> > +        vu_dispatch_unlock(se->virtio_dev);
> >  
> >          if (!ok) {
> >              fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
> > -- 
> > 2.26.2
> > 
> 




reply via email to

[Prev in Thread] Current Thread [Next in Thread]