From: Stanislav Kinsburskiy <skinsbur...@virtuozzo.com>

Instead of static variables.

Signed-off-by: Stanislav Kinsburskiy <skinsbur...@virtuozzo.com>
Reviewed-by: Andrey Ryabinin <aryabi...@virtuozzo.com>

khorenko@: rebase to RHEL8.4 notes:
- proc_event_counts completely rewritten

+++
proc connector: protect from task cpu migration in send_msg_ve

this_cpu_ptr() should be used under local_lock() (or task migration
should be disabled instead), so switch the order.

Signed-off-by: Konstantin Khorenko <khore...@virtuozzo.com>
Signed-off-by: Alexander Mikhalitsyn <alexander.mikhalit...@virtuozzo.com>

(cherry picked from vz8 commit 3dcf53e526938e05eb88af5957984bda77b41819)
Signed-off-by: Pavel Tikhomirov <ptikhomi...@virtuozzo.com>
---
 drivers/connector/cn_proc.c | 67 ++++++++++++++++++++++++++-----------
 include/linux/connector.h   |  3 +-
 2 files changed, 49 insertions(+), 21 deletions(-)

diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index a73d074ef488..838aeae09fcd 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -37,7 +37,6 @@ static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
        return (struct cn_msg *)(buffer + 4);
 }
 
-static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
 static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
 
 /* local_event.count is used as the sequence number of the netlink message */
@@ -45,15 +44,21 @@ struct local_event {
        local_lock_t lock;
        __u32 count;
 };
-static DEFINE_PER_CPU(struct local_event, local_event) = {
-       .lock = INIT_LOCAL_LOCK(lock),
-};
 
-static inline void send_msg(struct cn_msg *msg)
+static inline void send_msg_ve(struct ve_struct *ve, struct cn_msg *msg)
 {
-       local_lock(&local_event.lock);
+       struct local_event *le_ptr;
+
+       /*
+        * The following hack with local_event->lock address works only
+        * till the "lock" is the first field in the local_event struct,
+        * so be of the safe side.
+        */
+       BUILD_BUG_ON(offsetof(struct local_event, lock) != 0);
+       local_lock(&ve->cn->local_event->lock);
 
-       msg->seq = __this_cpu_inc_return(local_event.count) - 1;
+       le_ptr = this_cpu_ptr(ve->cn->local_event);
+       msg->seq = le_ptr->count++;
        ((struct proc_event *)msg->data)->cpu = smp_processor_id();
 
        /*
@@ -64,7 +69,7 @@ static inline void send_msg(struct cn_msg *msg)
         */
        cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
 
-       local_unlock(&local_event.lock);
+       local_unlock(&ve->cn->local_event->lock);
 }
 
 static struct cn_msg *cn_msg_fill(__u8 *buffer,
@@ -92,6 +97,13 @@ static struct cn_msg *cn_msg_fill(__u8 *buffer,
        return fill_event(ev, task, cookie) ? msg : NULL;
 }
 
+static int proc_event_num_listeners(struct ve_struct *ve)
+{
+       if (ve->cn)
+               return atomic_read(&ve->cn->proc_event_num_listeners);
+       return 0;
+}
+
 static void proc_event_connector(struct task_struct *task,
                                 int what, int cookie,
                                 bool (*fill_event)(struct proc_event *ev,
@@ -100,8 +112,9 @@ static void proc_event_connector(struct task_struct *task,
 {
        struct cn_msg *msg;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+       struct ve_struct *ve = task->task_ve;
 
-       if (atomic_read(&proc_event_num_listeners) < 1)
+       if (proc_event_num_listeners(ve) < 1)
                return;
 
        msg = cn_msg_fill(buffer, task, what, cookie, fill_event);
@@ -109,7 +122,7 @@ static void proc_event_connector(struct task_struct *task,
                return;
 
        /*  If cn_netlink_send() failed, the data is not sent */
-       send_msg(msg);
+       send_msg_ve(ve, msg);
 }
 
 static bool fill_fork_event(struct proc_event *ev, struct task_struct *task,
@@ -284,13 +297,13 @@ void proc_exit_connector(struct task_struct *task)
  * values because it's not being returned via syscall return
  * mechanisms.
  */
-static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+static void cn_proc_ack(struct ve_struct *ve, int err, int rcvd_seq, int 
rcvd_ack)
 {
        struct cn_msg *msg;
        struct proc_event *ev;
        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 
-       if (atomic_read(&proc_event_num_listeners) < 1)
+       if (proc_event_num_listeners(ve) < 1)
                return;
 
        msg = buffer_to_cn_msg(buffer);
@@ -305,7 +318,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       send_msg(msg);
+       send_msg_ve(ve, msg);
 }
 
 /**
@@ -316,6 +329,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                              struct netlink_skb_parms *nsp)
 {
        enum proc_cn_mcast_op *mc_op = NULL;
+       struct ve_struct *ve = get_exec_env();
        int err = 0;
 
        if (msg->len != sizeof(*mc_op))
@@ -339,10 +353,10 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
        mc_op = (enum proc_cn_mcast_op *)msg->data;
        switch (*mc_op) {
        case PROC_CN_MCAST_LISTEN:
-               atomic_inc(&proc_event_num_listeners);
+               atomic_inc(&ve->cn->proc_event_num_listeners);
                break;
        case PROC_CN_MCAST_IGNORE:
-               atomic_dec(&proc_event_num_listeners);
+               atomic_dec(&ve->cn->proc_event_num_listeners);
                break;
        default:
                err = EINVAL;
@@ -350,22 +364,37 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
        }
 
 out:
-       cn_proc_ack(err, msg->seq, msg->ack);
+       cn_proc_ack(ve, err, msg->seq, msg->ack);
 }
 
 int cn_proc_init_ve(struct ve_struct *ve)
 {
-       int err = cn_add_callback_ve(ve, &cn_proc_event_id,
-                                    "cn_proc",
-                                    &cn_proc_mcast_ctl);
+       int err, cpu;
+       struct local_event *le_ptr;
+
+       ve->cn->local_event = alloc_percpu(struct local_event);
+       if (!ve->cn->local_event)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               le_ptr = per_cpu_ptr(ve->cn->local_event, cpu);
+               local_lock_init(&le_ptr->lock);
+       }
+
+       err = cn_add_callback_ve(ve, &cn_proc_event_id,
+                                 "cn_proc",
+                                 &cn_proc_mcast_ctl);
        if (err) {
                pr_warn("VE#%d: cn_proc failed to register\n", ve->veid);
+               free_percpu(ve->cn->local_event);
                return err;
        }
+       atomic_set(&ve->cn->proc_event_num_listeners, 0);
        return 0;
 }
 
 void cn_proc_fini_ve(struct ve_struct *ve)
 {
        cn_del_callback_ve(ve, &cn_proc_event_id);
+       free_percpu(ve->cn->local_event);
 }
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 23ba9dd97571..fa270a61e577 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -59,8 +59,7 @@ struct cn_private {
        int             cn_already_initialized;
 
        atomic_t        proc_event_num_listeners;
-       u32 __percpu    *proc_event_counts;
-
+       struct local_event __percpu *local_event;
 };
 
 struct ve_struct;
-- 
2.31.1

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to