After the recent introduction of many new coroutine callbacks, a couple calls from non-coroutine_fn to coroutine_fn have sneaked in; fix them.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- block/mirror.c | 4 ++-- include/block/graph-lock.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/mirror.c b/block/mirror.c index af9bbd23d4cf..80fa345071fe 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -270,8 +270,8 @@ static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, /* Round offset and/or bytes to target cluster if COW is needed, and * return the offset of the adjusted tail against original. */ -static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, - uint64_t *bytes) +static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset, + uint64_t *bytes) { bool need_cow; int ret = 0; diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index 18cc14de22fa..ac0fef860581 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -208,14 +208,14 @@ typedef struct GraphLockable { } GraphLockable; * unlocked. TSA_ASSERT() makes sure that the following calls know that we * hold the lock while unlocking is left unchecked. */ -static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA +static inline GraphLockable * TSA_ASSERT(graph_lock) TSA_NO_TSA coroutine_fn graph_lockable_auto_lock(GraphLockable *x) { bdrv_graph_co_rdlock(); return x; } -static inline void TSA_NO_TSA +static inline void TSA_NO_TSA coroutine_fn graph_lockable_auto_unlock(GraphLockable *x) { bdrv_graph_co_rdunlock(); -- 2.39.2