( These are pushed to github.com/hallyn/lxc branch lxclock-flock.rebase )

Create three pairs of functions:
        int process_lock(void);
        void process_unlock(void);
        int container_mem_lock(struct lxc_container *c)
        void container_mem_unlock(struct lxc_container *c)
        int container_disk_lock(struct lxc_container *c);
        void container_disk_unlock(struct lxc_container *c);

and use those in lxccontainer.c

process_lock() is to protect the process state among multiple threads.
container_mem_lock() is to protect a struct container among multiple
threads.  container_disk_lock is to protect a container on disk.

Also remove the lock in lxcapi_init_pid() as Dwight suggested.

Fix a typo (s/container/contain) spotted by Dwight.

More locking fixes are needed, but let's first the the fundamentals
right.  How close does this get us?

Signed-off-by: Serge Hallyn <serge.hal...@ubuntu.com>
---
 src/lxc/lxccontainer.c | 128 +++++++++++++++++--------------------------------
 src/lxc/lxclock.c      |  91 ++++++++++++++++++++++++-----------
 src/lxc/lxclock.h      |   9 +++-
 3 files changed, 113 insertions(+), 115 deletions(-)

diff --git a/src/lxc/lxccontainer.c b/src/lxc/lxccontainer.c
index b2de0e6..adb1c7a 100644
--- a/src/lxc/lxccontainer.c
+++ b/src/lxc/lxccontainer.c
@@ -42,8 +42,6 @@
 #include <arpa/inet.h>
 #include <ifaddrs.h>
 
-extern pthread_mutex_t thread_mutex;
-
 /* Define unshare() if missing from the C library */
 /* this is also in attach.c and lxccontainer.c: commonize it in utils.c */
 #ifndef HAVE_UNSHARE
@@ -153,7 +151,7 @@ int lxc_container_get(struct lxc_container *c)
        if (c->numthreads < 1)
                return 0;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return 0;
        if (c->numthreads < 1) {
                // bail without trying to unlock, bc the privlock is now 
probably
@@ -161,7 +159,7 @@ int lxc_container_get(struct lxc_container *c)
                return 0;
        }
        c->numthreads++;
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return 1;
 }
 
@@ -169,14 +167,14 @@ int lxc_container_put(struct lxc_container *c)
 {
        if (!c)
                return -1;
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return -1;
        if (--c->numthreads < 1) {
-               lxcunlock(c->privlock);
+               container_mem_unlock(c);
                lxc_container_free(c);
                return 1;
        }
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return 0;
 }
 
@@ -196,7 +194,7 @@ static bool lxcapi_is_defined(struct lxc_container *c)
        if (!c)
                return false;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return false;
        if (!c->configfile)
                goto out;
@@ -206,7 +204,7 @@ static bool lxcapi_is_defined(struct lxc_container *c)
        ret = true;
 
 out:
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return ret;
 }
 
@@ -217,15 +215,11 @@ static const char *lxcapi_state(struct lxc_container *c)
 
        if (!c)
                return NULL;
-       if (lxclock(c->privlock, 0))
+       if (container_disk_lock(c))
                return NULL;
-       if (lxclock(c->slock, 0))
-               goto bad;
        s = lxc_getstate(c->name, c->config_path);
        ret = lxc_state2str(s);
-       lxcunlock(c->slock);
-bad:
-       lxcunlock(c->privlock);
+       container_disk_unlock(c);
 
        return ret;
 }
@@ -255,15 +249,10 @@ static bool lxcapi_freeze(struct lxc_container *c)
        if (!c)
                return false;
 
-       if (lxclock(c->privlock, 0))
-               return false;
-       if (lxclock(c->slock, 0)) {
-               lxcunlock(c->privlock);
+       if (container_disk_lock(c))
                return false;
-       }
        ret = lxc_freeze(c->name, c->config_path);
-       lxcunlock(c->slock);
-       lxcunlock(c->privlock);
+       container_disk_unlock(c);
        if (ret)
                return false;
        return true;
@@ -275,15 +264,10 @@ static bool lxcapi_unfreeze(struct lxc_container *c)
        if (!c)
                return false;
 
-       if (lxclock(c->privlock, 0))
-               return false;
-       if (lxclock(c->slock, 0)) {
-               lxcunlock(c->privlock);
+       if (container_disk_lock(c))
                return false;
-       }
        ret = lxc_unfreeze(c->name, c->config_path);
-       lxcunlock(c->slock);
-       lxcunlock(c->privlock);
+       container_disk_unlock(c);
        if (ret)
                return false;
        return true;
@@ -295,16 +279,7 @@ static pid_t lxcapi_init_pid(struct lxc_container *c)
        if (!c)
                return -1;
 
-       if (lxclock(c->privlock, 0))
-               return -1;
-       if (lxclock(c->slock, 0)) {
-               lxcunlock(c->privlock);
-               return -1;
-       }
-       ret = lxc_cmd_get_init_pid(c->name, c->config_path);
-       lxcunlock(c->slock);
-       lxcunlock(c->privlock);
-       return ret;
+       return lxc_cmd_get_init_pid(c->name, c->config_path);
 }
 
 static bool load_config_locked(struct lxc_container *c, const char *fname)
@@ -328,15 +303,10 @@ static bool lxcapi_load_config(struct lxc_container *c, 
const char *alt_file)
                fname = alt_file;
        if (!fname)
                return false;
-       if (lxclock(c->privlock, 0))
-               return false;
-       if (lxclock(c->slock, 0)) {
-               lxcunlock(c->privlock);
+       if (container_disk_lock(c))
                return false;
-       }
        ret = load_config_locked(c, fname);
-       lxcunlock(c->slock);
-       lxcunlock(c->privlock);
+       container_disk_unlock(c);
        return ret;
 }
 
@@ -399,11 +369,11 @@ static bool lxcapi_start(struct lxc_container *c, int 
useinit, char * const argv
        if (useinit && !argv)
                return false;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return false;
        conf = c->lxc_conf;
        daemonize = c->daemonize;
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
 
        if (useinit) {
                ret = lxc_execute(c->name, argv, 1, conf, c->config_path);
@@ -424,23 +394,20 @@ static bool lxcapi_start(struct lxc_container *c, int 
useinit, char * const argv
                        return false;
                lxc_monitord_spawn(c->config_path);
 
-               ret = pthread_mutex_lock(&thread_mutex);
-               if (ret != 0) {
-                       ERROR("pthread_mutex_lock returned:%d %s", ret, 
strerror(ret));
+               if (process_lock())
                        return false;
-               }
                pid_t pid = fork();
                if (pid < 0) {
                        lxc_container_put(c);
-                       pthread_mutex_unlock(&thread_mutex);
+                       process_unlock();
                        return false;
                }
                if (pid != 0) {
                        ret = wait_on_daemonized_start(c);
-                       pthread_mutex_unlock(&thread_mutex);
+                       process_unlock();
                        return ret;
                }
-               pthread_mutex_unlock(&thread_mutex);
+               process_unlock();
                /* second fork to be reparented by init */
                pid = fork();
                if (pid < 0) {
@@ -628,13 +595,8 @@ static bool lxcapi_create(struct lxc_container *c, char 
*t, char *const argv[])
        /* we're going to fork.  but since we'll wait for our child, we
         * don't need to lxc_container_get */
 
-       if (lxclock(c->privlock, 0))
+       if (container_disk_lock(c))
                goto out;
-       if (lxclock(c->slock, 0)) {
-               ERROR("failed to grab global container lock for %s\n", c->name);
-               lxcunlock(c->privlock);
-               goto out_unlock1;
-       }
 
        pid = fork();
        if (pid < 0) {
@@ -713,9 +675,7 @@ static bool lxcapi_create(struct lxc_container *c, char *t, 
char *const argv[])
        bret = load_config_locked(c, c->configfile);
 
 out_unlock:
-       lxcunlock(c->slock);
-out_unlock1:
-       lxcunlock(c->privlock);
+       container_disk_unlock(c);
 out:
        if (tpath)
                free(tpath);
@@ -793,11 +753,10 @@ static bool lxcapi_clear_config_item(struct lxc_container 
*c, const char *key)
 
        if (!c || !c->lxc_conf)
                return false;
-       if (lxclock(c->privlock, 0)) {
+       if (container_mem_lock(c))
                return false;
-       }
        ret = lxc_clear_config_item(c->lxc_conf, key);
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return ret == 0;
 }
 
@@ -919,11 +878,10 @@ static int lxcapi_get_config_item(struct lxc_container 
*c, const char *key, char
 
        if (!c || !c->lxc_conf)
                return -1;
-       if (lxclock(c->privlock, 0)) {
+       if (container_mem_lock(c))
                return -1;
-       }
        ret = lxc_get_config_item(c->lxc_conf, key, retv, inlen);
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return ret;
 }
 
@@ -938,12 +896,12 @@ static int lxcapi_get_keys(struct lxc_container *c, const 
char *key, char *retv,
         */
        if (!c || !c->lxc_conf)
                return -1;
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return -1;
        int ret = -1;
        if (strncmp(key, "lxc.network.", 12) == 0)
                ret =  lxc_list_nicconfigs(c->lxc_conf, key, retv, inlen);
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return ret;
 }
 
@@ -968,13 +926,13 @@ static bool lxcapi_save_config(struct lxc_container *c, 
const char *alt_file)
        FILE *fout = fopen(alt_file, "w");
        if (!fout)
                return false;
-       if (lxclock(c->privlock, 0)) {
+       if (container_mem_lock(c)) {
                fclose(fout);
                return false;
        }
        write_config(fout, c->lxc_conf);
        fclose(fout);
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return true;
 }
 
@@ -1015,7 +973,7 @@ static bool lxcapi_set_config_item(struct lxc_container 
*c, const char *key, con
        if (!c)
                return false;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return false;
 
        if (!c->lxc_conf)
@@ -1030,7 +988,7 @@ static bool lxcapi_set_config_item(struct lxc_container 
*c, const char *key, con
                b = true;
 
 err:
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return b;
 }
 
@@ -1091,7 +1049,7 @@ static bool lxcapi_set_config_path(struct lxc_container 
*c, const char *path)
        if (!c)
                return b;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return b;
 
        p = strdup(path);
@@ -1117,7 +1075,7 @@ static bool lxcapi_set_config_path(struct lxc_container 
*c, const char *path)
 err:
        if (oldpath)
                free(oldpath);
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return b;
 }
 
@@ -1130,7 +1088,7 @@ static bool lxcapi_set_cgroup_item(struct lxc_container 
*c, const char *subsys,
        if (!c)
                return false;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return false;
 
        if (is_stopped_nolock(c))
@@ -1140,7 +1098,7 @@ static bool lxcapi_set_cgroup_item(struct lxc_container 
*c, const char *subsys,
        if (!ret)
                b = true;
 err:
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return b;
 }
 
@@ -1151,7 +1109,7 @@ static int lxcapi_get_cgroup_item(struct lxc_container 
*c, const char *subsys, c
        if (!c || !c->lxc_conf)
                return -1;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return -1;
 
        if (is_stopped_nolock(c))
@@ -1160,7 +1118,7 @@ static int lxcapi_get_cgroup_item(struct lxc_container 
*c, const char *subsys, c
        ret = lxc_cgroup_get(c->name, subsys, retv, inlen, c->config_path);
 
 out:
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return ret;
 }
 
@@ -1615,7 +1573,7 @@ struct lxc_container *lxcapi_clone(struct lxc_container 
*c, const char *newname,
        if (!c || !c->is_defined(c))
                return NULL;
 
-       if (lxclock(c->privlock, 0))
+       if (container_mem_lock(c))
                return NULL;
 
        if (c->is_running(c)) {
@@ -1697,11 +1655,11 @@ struct lxc_container *lxcapi_clone(struct lxc_container 
*c, const char *newname,
                goto out;
 
        // TODO: update c's lxc.snapshot = count
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        return c2;
 
 out:
-       lxcunlock(c->privlock);
+       container_mem_unlock(c);
        if (c2) {
                c2->destroy(c2);
                lxc_container_put(c2);
diff --git a/src/lxc/lxclock.c b/src/lxc/lxclock.c
index bddd1e9..629ce52 100644
--- a/src/lxc/lxclock.c
+++ b/src/lxc/lxclock.c
@@ -48,7 +48,10 @@ static char *lxclock_name(const char *p, const char *n)
                free(dest);
                return NULL;
        }
-       if (mkdir_p(dest, 0755) < 0) {
+       process_lock();
+       ret = mkdir_p(dest, 0755);
+       process_unlock();
+       if (ret < 0) {
                free(dest);
                return NULL;
        }
@@ -80,11 +83,6 @@ static sem_t *lxc_new_unnamed_sem(void)
 struct lxc_lock *lxc_newlock(const char *lxcpath, const char *name)
 {
        struct lxc_lock *l;
-       int ret = pthread_mutex_lock(&thread_mutex);
-       if (ret != 0) {
-               ERROR("pthread_mutex_lock returned:%d %s", ret, strerror(ret));
-               return NULL;
-       }
 
        l = malloc(sizeof(*l));
        if (!l)
@@ -106,18 +104,12 @@ struct lxc_lock *lxc_newlock(const char *lxcpath, const 
char *name)
        l->u.f.fd = -1;
 
 out:
-       pthread_mutex_unlock(&thread_mutex);
        return l;
 }
 
 int lxclock(struct lxc_lock *l, int timeout)
 {
-       int saved_errno = errno;
-       int ret = pthread_mutex_lock(&thread_mutex);
-       if (ret != 0) {
-               ERROR("pthread_mutex_lock returned:%d %s", ret, strerror(ret));
-               return ret;
-       }
+       int ret, saved_errno = errno;
 
        switch(l->type) {
        case LXC_LOCK_ANON_SEM:
@@ -149,35 +141,31 @@ int lxclock(struct lxc_lock *l, int timeout)
                        ret = -2;
                        goto out;
                }
+               process_lock();
                if (l->u.f.fd == -1) {
                        l->u.f.fd = open(l->u.f.fname, O_RDWR|O_CREAT,
                                        S_IWUSR | S_IRUSR);
                        if (l->u.f.fd == -1) {
+                               process_unlock();
                                ERROR("Error opening %s", l->u.f.fname);
                                goto out;
                        }
                }
                ret = flock(l->u.f.fd, LOCK_EX);
+               process_unlock();
                if (ret == -1)
                        saved_errno = errno;
                break;
        }
 
 out:
-       pthread_mutex_unlock(&thread_mutex);
        errno = saved_errno;
        return ret;
 }
 
 int lxcunlock(struct lxc_lock *l)
 {
-       int saved_errno = errno;
-       int ret = pthread_mutex_lock(&thread_mutex);
-
-       if (ret != 0) {
-               ERROR("pthread_mutex_lock returned:%d %s", ret, strerror(ret));
-               return ret;
-       }
+       int ret = 0, saved_errno = errno;
 
        switch(l->type) {
        case LXC_LOCK_ANON_SEM:
@@ -188,6 +176,7 @@ int lxcunlock(struct lxc_lock *l)
                        saved_errno = errno;
                break;
        case LXC_LOCK_FLOCK:
+               process_lock();
                if (l->u.f.fd != -1) {
                        if ((ret = flock(l->u.f.fd, LOCK_UN)) < 0)
                                saved_errno = errno;
@@ -195,22 +184,23 @@ int lxcunlock(struct lxc_lock *l)
                        l->u.f.fd = -1;
                } else
                        ret = -2;
+               process_unlock();
                break;
        }
 
-       pthread_mutex_unlock(&thread_mutex);
        errno = saved_errno;
        return ret;
 }
 
+/*
+ * lxc_putlock() is only called when a container_new() fails,
+ * or during container_put(), which is already guaranteed to
+ * only be done by one task.
+ * So the only exclusion we need to provide here is for regular
+ * thread safety (i.e. file descriptor table changes).
+ */
 void lxc_putlock(struct lxc_lock *l)
 {
-       int ret = pthread_mutex_lock(&thread_mutex);
-       if (ret != 0) {
-               ERROR("pthread_mutex_lock returned:%d %s", ret, strerror(ret));
-               return;
-       }
-
        if (!l)
                goto out;
        switch(l->type) {
@@ -219,16 +209,59 @@ void lxc_putlock(struct lxc_lock *l)
                        sem_close(l->u.sem);
                break;
        case LXC_LOCK_FLOCK:
+               process_lock();
                if (l->u.f.fd != -1) {
                        close(l->u.f.fd);
                        l->u.f.fd = -1;
                }
+               process_unlock();
                if (l->u.f.fname) {
                        free(l->u.f.fname);
                        l->u.f.fname = NULL;
                }
                break;
        }
-out:
+}
+
+int process_lock(void)
+{
+       int ret;
+       ret = pthread_mutex_lock(&thread_mutex);
+       if (ret != 0)
+               ERROR("pthread_mutex_lock returned:%d %s", ret, strerror(ret));
+       return ret;
+}
+
+void process_unlock(void)
+{
        pthread_mutex_unlock(&thread_mutex);
 }
+
+int container_mem_lock(struct lxc_container *c)
+{
+       return lxclock(c->privlock, 0);
+}
+
+void container_mem_unlock(struct lxc_container *c)
+{
+       lxcunlock(c->privlock);
+}
+
+int container_disk_lock(struct lxc_container *c)
+{
+       int ret;
+
+       if ((ret = lxclock(c->privlock, 0)))
+               return ret;
+       if ((ret = lxclock(c->slock, 0))) {
+               lxcunlock(c->privlock);
+               return ret;
+       }
+       return 0;
+}
+
+void container_disk_unlock(struct lxc_container *c)
+{
+       lxcunlock(c->slock);
+       lxcunlock(c->privlock);
+}
diff --git a/src/lxc/lxclock.h b/src/lxc/lxclock.h
index 61e7bbb..d03b688 100644
--- a/src/lxc/lxclock.h
+++ b/src/lxc/lxclock.h
@@ -53,7 +53,7 @@ struct lxc_lock {
  * We use that to protect the containers as represented on disk.
  * lxc_newlock() for the named lock only allocates the pathname in
  * memory so we can quickly open+lock it at lxclock.
- * l->u.f.fname will container the malloc'ed name (which must be
+ * l->u.f.fname will contain the malloc'ed name (which must be
  * freed when the container is freed), and u.f.fd = -1.
  *
  * return lxclock on success, NULL on failure.
@@ -81,3 +81,10 @@ extern int lxclock(struct lxc_lock *lock, int timeout);
 extern int lxcunlock(struct lxc_lock *lock);
 
 extern void lxc_putlock(struct lxc_lock *l);
+
+extern int process_lock(void);
+extern void process_unlock(void);
+extern int container_mem_lock(struct lxc_container *c)
+extern void container_mem_unlock(struct lxc_container *c)
+extern int container_disk_lock(struct lxc_container *c);
+extern void container_disk_unlock(struct lxc_container *c);
-- 
1.8.1.2


------------------------------------------------------------------------------
Try New Relic Now & We'll Send You this Cool Shirt
New Relic is the only SaaS-based application performance monitoring service 
that delivers powerful full stack analytics. Optimize and monitor your
browser, app, & servers with just a few lines of code. Try New Relic
and get this awesome Nerd Life shirt! http://p.sf.net/sfu/newrelic_d2d_may
_______________________________________________
Lxc-devel mailing list
Lxc-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/lxc-devel

Reply via email to