ALSA: seq: More protection for concurrent write and ioctl races
authorTakashi Iwai <tiwai@suse.de>
Mon, 5 Mar 2018 21:06:09 +0000 (22:06 +0100)
committerTakashi Iwai <tiwai@suse.de>
Thu, 8 Mar 2018 11:05:37 +0000 (12:05 +0100)
This patch is an attempt for further hardening against races between
the concurrent write and ioctls.  The previous fix d15d662e89fc
("ALSA: seq: Fix racy pool initializations") covered the race of the
pool initialization at writer and the pool resize ioctl by the
client->ioctl_mutex (CVE-2018-1000004).  However, basically this mutex
should be applied more widely to the whole write operation for
avoiding the unexpected pool operations by another thread.

The only change outside snd_seq_write() is the additional mutex
argument to helper functions, so that we can unlock / relock the given
mutex temporarily during schedule() call for blocking write.

Fixes: d15d662e89fc ("ALSA: seq: Fix racy pool initializations")
Reported-by: 范龙飞 <long7573@126.com>
Reported-by: Nicolai Stange <nstange@suse.de>
Reviewed-and-tested-by: Nicolai Stange <nstange@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_fifo.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_memory.h

index d41ce3ed62ca9ce7ccf235868f8260de92f11686..1b62421dadd1e20459a35adb9fff40952c5d4edc 100644 (file)
@@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
 static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
                                        struct snd_seq_event *event,
                                        struct file *file, int blocking,
-                                       int atomic, int hop)
+                                       int atomic, int hop,
+                                       struct mutex *mutexp)
 {
        struct snd_seq_event_cell *cell;
        int err;
@@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
                return -ENXIO; /* queue is not allocated */
 
        /* allocate an event cell */
-       err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
+       err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
+                               file, mutexp);
        if (err < 0)
                return err;
 
@@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
                return -ENXIO;
 
        /* allocate the pool now if the pool is not allocated yet */ 
+       mutex_lock(&client->ioctl_mutex);
        if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
-               mutex_lock(&client->ioctl_mutex);
                err = snd_seq_pool_init(client->pool);
-               mutex_unlock(&client->ioctl_mutex);
                if (err < 0)
-                       return -ENOMEM;
+                       goto out;
        }
 
        /* only process whole events */
@@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
                /* ok, enqueue it */
                err = snd_seq_client_enqueue_event(client, &event, file,
                                                   !(file->f_flags & O_NONBLOCK),
-                                                  0, 0);
+                                                  0, 0, &client->ioctl_mutex);
                if (err < 0)
                        break;
 
@@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
                written += len;
        }
 
+ out:
+       mutex_unlock(&client->ioctl_mutex);
        return written ? written : err;
 }
 
@@ -2263,7 +2266,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
        if (! cptr->accept_output)
                result = -EPERM;
        else /* send it */
-               result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
+               result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
+                                                     atomic, hop, NULL);
 
        snd_seq_client_unlock(cptr);
        return result;
index a8c2822e01984ff207c8a46b5ebd67546ee8b2eb..72c0302a55d23c05720d6062bef600b40fec6971 100644 (file)
@@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
                return -EINVAL;
 
        snd_use_lock_use(&f->use_lock);
-       err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
+       err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
        if (err < 0) {
                if ((err == -ENOMEM) || (err == -EAGAIN))
                        atomic_inc(&f->overflow);
index f763682584a8f09837240b683a2da5413149b728..ab1112e90f88dbd29bae5eea8edd175175504edc 100644 (file)
@@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
  */
 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
                              struct snd_seq_event_cell **cellp,
-                             int nonblock, struct file *file)
+                             int nonblock, struct file *file,
+                             struct mutex *mutexp)
 {
        struct snd_seq_event_cell *cell;
        unsigned long flags;
@@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
                set_current_state(TASK_INTERRUPTIBLE);
                add_wait_queue(&pool->output_sleep, &wait);
                spin_unlock_irq(&pool->lock);
+               if (mutexp)
+                       mutex_unlock(mutexp);
                schedule();
+               if (mutexp)
+                       mutex_lock(mutexp);
                spin_lock_irq(&pool->lock);
                remove_wait_queue(&pool->output_sleep, &wait);
                /* interrupted? */
@@ -287,7 +292,7 @@ __error:
  */
 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
                      struct snd_seq_event_cell **cellp, int nonblock,
-                     struct file *file)
+                     struct file *file, struct mutex *mutexp)
 {
        int ncells, err;
        unsigned int extlen;
@@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
        if (ncells >= pool->total_elements)
                return -ENOMEM;
 
-       err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
+       err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
        if (err < 0)
                return err;
 
@@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
                        int size = sizeof(struct snd_seq_event);
                        if (len < size)
                                size = len;
-                       err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
+                       err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
+                                                mutexp);
                        if (err < 0)
                                goto __error;
                        if (cell->event.data.ext.ptr == NULL)
index 32f959c17786d9ac8c071ba0e6fd070dc06da78b..3abe306c394af95c8dbdb99475f7829a17efc33c 100644 (file)
@@ -66,7 +66,8 @@ struct snd_seq_pool {
 void snd_seq_cell_free(struct snd_seq_event_cell *cell);
 
 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
-                     struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
+                     struct snd_seq_event_cell **cellp, int nonblock,
+                     struct file *file, struct mutex *mutexp);
 
 /* return number of unused (free) cells */
 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)