In order to be completely generic, we have to double check the read seqlock after acquiring a reference to the fence. If the driver is allocating fences from a SLAB_DESTROY_BY_RCU, or similar freelist, then within an RCU grace period a fence may be freed and reallocated. The RCU read side critical section does not prevent this reallocation, instead we have to inspect the reservation's seqlock to double check if the fences have been reassigned as we were acquiring our reference. Here it is the combination of the write barrier in the seqlock, and of the full barrier after a successful fence_get_rcu() that ensure that as long as the seqlock is stable we hold the right references to the fences.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter at ffwll.ch> Cc: Maarten Lankhorst <maarten.lankhorst at linux.intel.com> Cc: Christian König <christian.koenig at amd.com> Cc: Alex Deucher <alexander.deucher at amd.com> Cc: Sumit Semwal <sumit.semwal at linaro.org> --- drivers/dma-buf/reservation.c | 73 +++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 9566a62ad8e3..8b1dd90b1a4e 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -280,18 +280,24 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, unsigned *pshared_count, struct fence ***pshared) { - unsigned shared_count = 0; - unsigned retry = 1; - struct fence **shared = NULL, *fence_excl = NULL; - int ret = 0; + struct fence **shared = NULL; + struct fence *fence_excl; + unsigned int shared_count; + int ret = 1; - while (retry) { + do { struct reservation_object_list *fobj; - unsigned seq; + unsigned int seq; + unsigned int i; - seq = read_seqcount_begin(&obj->seq); + shared_count = i = 0; rcu_read_lock(); + seq = read_seqcount_begin(&obj->seq); + + fence_excl = rcu_dereference(obj->fence_excl); + if (fence_excl && !fence_get_rcu(fence_excl)) + goto unlock; fobj = rcu_dereference(obj->fence); if (fobj) { @@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, } ret = -ENOMEM; - shared_count = 0; break; } shared = nshared; - memcpy(shared, fobj->shared, sz); shared_count = fobj->shared_count; - } else - shared_count = 0; - fence_excl = rcu_dereference(obj->fence_excl); - - retry = read_seqcount_retry(&obj->seq, seq); - if (retry) - goto unlock; - - if (!fence_excl || fence_get_rcu(fence_excl)) { - unsigned i; for (i = 0; i < shared_count; ++i) { - if (fence_get_rcu(shared[i])) - continue; - - /* uh oh, refcount failed, abort and retry */ - while (i--) - fence_put(shared[i]); - - if (fence_excl) { - fence_put(fence_excl); - fence_excl = NULL; - } - - retry = 1; - break; + shared[i] = rcu_dereference(fobj->shared[i]); + if (!fence_get_rcu(shared[i])) + break; } - } else - retry = 1; + } + + if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { + while (i--) + fence_put(shared[i]); + fence_put(fence_excl); + goto unlock; + } + ret = 0; unlock: rcu_read_unlock(); - } - *pshared_count = shared_count; - if (shared_count) - *pshared = shared; - else { - *pshared = NULL; + } while (ret); + + if (!shared_count) { kfree(shared); + shared = NULL; } + + *pshared_count = shared_count; + *pshared = shared; *pfence_excl = fence_excl; return ret; -- 2.8.1