Am Freitag, 15. Mai 2015, 14:46:26 schrieb Herbert Xu:

Hi Herbert,

> On Wed, May 13, 2015 at 09:54:41PM +0200, Stephan Mueller wrote:
> >  /*
> > 
> > + * Equivalent function to get_random_bytes with the difference that this
> > + * function blocks the request until the nonblocking_pool is initialized.
> > + */
> > +void get_blocking_random_bytes(void *buf, int nbytes)
> > +{
> > +   if (unlikely(nonblocking_pool.initialized == 0))
> > +           wait_event_interruptible(urandom_init_wait,
> > +                                    nonblocking_pool.initialized);
> > +   extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
> 
> So what if the wait was interrupted? You are going to extract
> entropy from an empty pool.
> 
> Anyway, you still haven't addressed my primary concern with this
> model which is the potential for dead-lock.  Sleeping for an open
> period of time like this in a work queue is bad form.  It may also
> lead to dead-locks if whatever you're waiting for happened to use
> the same work thread.
> 
> That's why I think you should simply provide a function and data
> pointer which random.c can then stash onto a list to call when
> the pool is ready.

Thanks for the hint to the list. Before handing in another formal patch, may i 
ask for checking the following approach? I would think that this one should 
cover your concerns.

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 9cd6968..9bc2a57 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -409,6 +409,19 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
 static struct fasync_struct *fasync;
 
+static LIST_HEAD(random_wait_list);
+static DEFINE_MUTEX(random_wait_list_mutex);
+struct random_work {
+       struct list_head        list;
+       struct work_struct      rw_work;
+       void                    *rw_buf;
+       int                     rw_len;
+       void                    *rw_private;
+       void                    (*rw_cb)(void *buf, int buflen,
+                                        void *private);
+};
+static void process_random_waiters(void);
+
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -660,6 +673,7 @@ retry:
                r->entropy_total = 0;
                if (r == &nonblocking_pool) {
                        prandom_reseed_late();
+                       process_random_waiters();
                        wake_up_interruptible(&urandom_init_wait);
                        pr_notice("random: %s pool is initialized\n", r->name);
                }
@@ -1778,3 +1792,64 @@ void add_hwgenerator_randomness(const char *buffer, 
size_t count,
        credit_entropy_bits(poolp, entropy);
 }
 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+static void process_random_waiters(void)
+{
+       struct random_work *rw = NULL;
+
+       mutex_lock(&random_wait_list_mutex);
+       while (!list_empty(&random_wait_list)) {
+               rw = list_first_entry(&random_wait_list, struct random_work,
+                                     list);
+               list_del(&rw->list);
+               schedule_work(&rw->rw_work);
+       }
+       mutex_unlock(&random_wait_list_mutex);
+}
+
+static void get_blocking_random_bytes_work(struct work_struct *work)
+{
+       struct random_work *rw = container_of(work, struct random_work,
+                                             rw_work);
+
+       get_random_bytes(rw->rw_buf, rw->rw_len);
+       rw->rw_cb(rw->rw_buf, rw->rw_len, rw->rw_private);
+       kfree(rw);
+}
+
+/*
+ * Equivalent function to get_random_bytes with the difference that this
+ * function blocks the request until the nonblocking_pool is initialized.
+ */
+int get_blocking_random_bytes_cb(void *buf, int nbytes, void *private,
+                                void (*cb)(void *buf, int buflen,
+                                           void *private))
+{
+       struct random_work *rw = NULL;
+       int ret = 0;
+
+       mutex_lock(&random_wait_list_mutex);
+       list_for_each_entry(rw, &random_wait_list, list)
+               if (buf == rw->rw_buf)
+                       goto out;
+
+       rw = kmalloc(sizeof(struct random_work), GFP_KERNEL);
+       if (!rw) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       INIT_WORK(&rw->rw_work, get_blocking_random_bytes_work);
+       rw->rw_buf = buf;
+       rw->rw_len = nbytes;
+       rw->rw_private = private;
+       rw->rw_cb = cb;
+       list_add_tail(&rw->list, &random_wait_list);
+
+out:
+       mutex_unlock(&random_wait_list_mutex);
+       if (nonblocking_pool.initialized)
+               process_random_waiters();
+
+       return ret;
+}
+EXPORT_SYMBOL(get_blocking_random_bytes_cb);
diff --git a/include/linux/random.h b/include/linux/random.h
index b05856e..b57525f 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -15,6 +15,9 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
 
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
+extern int get_blocking_random_bytes_cb(void *buf, int nbytes, void *private,
+                                       void (*cb)(void *buf, int buflen,
+                                                  void *private));
 void generate_random_uuid(unsigned char uuid_out[16]);
 extern int random_int_secret_init(void);
 
-- 
2.1.0



-- 
Ciao
Stephan
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to