Enforce that ipc_rcu_alloc returns a cacheline aligned pointer on SMP. Rational: The SysV sem code tries to move the main spinlock into a seperate cacheline (____cacheline_aligned_in_smp). This works only if ipc_rcu_alloc returns cacheline aligned pointers. vmalloc and kmalloc return cacheline algined pointers, the implementation of ipc_rcu_alloc breaks that.
Andrew: Could you merge it into -akpm and then forward it towards Linus' tree? Signed-off-by: Manfred Spraul <manf...@colorfullife.com> --- ipc/util.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/ipc/util.c b/ipc/util.c index 809ec5e..9623c8e 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -469,9 +469,7 @@ void ipc_free(void* ptr, int size) struct ipc_rcu { struct rcu_head rcu; atomic_t refcount; - /* "void *" makes sure alignment of following data is sane. */ - void *data[0]; -}; +} ____cacheline_aligned_in_smp; /** * ipc_rcu_alloc - allocate ipc and rcu space @@ -489,12 +487,14 @@ void *ipc_rcu_alloc(int size) if (unlikely(!out)) return NULL; atomic_set(&out->refcount, 1); - return out->data; + return out+1; } int ipc_rcu_getref(void *ptr) { - return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu, data)->refcount); + struct ipc_rcu *p = ((struct ipc_rcu*)ptr)-1; + + return atomic_inc_not_zero(&p->refcount); } /** @@ -508,7 +508,7 @@ static void ipc_schedule_free(struct rcu_head *head) void ipc_rcu_putref(void *ptr) { - struct ipc_rcu *p = container_of(ptr, struct ipc_rcu, data); + struct ipc_rcu *p = ((struct ipc_rcu*)ptr)-1; if (!atomic_dec_and_test(&p->refcount)) return; -- 1.8.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/