Am 29.08.2016 um 19:10 hat Pavel Butsykin geschrieben: > for case when the cache partially covers request we are part of the request > is filled from the cache, and the other part request from disk. Also add > reference counting for nodes, as way to maintain multithreading. > > There is still no full synchronization in multithreaded mode. > > Signed-off-by: Pavel Butsykin <pbutsy...@virtuozzo.com> > --- > block/pcache.c | 169 > ++++++++++++++++++++++++++++++++++++++++++++++++++++----- > 1 file changed, 155 insertions(+), 14 deletions(-) > > diff --git a/block/pcache.c b/block/pcache.c > index 28bd056..6114289 100644 > --- a/block/pcache.c > +++ b/block/pcache.c > @@ -58,7 +58,10 @@ typedef struct BlockNode { > typedef struct PCNode { > BlockNode cm; > > + uint32_t status;
I guess this is NODE_*_STATUS. Make it a named enum then instead of uint32_t so that it's obvious what this field means. > + uint32_t ref; > uint8_t *data; > + CoMutex lock; > } PCNode; > > typedef struct ReqStor { > @@ -95,9 +98,23 @@ typedef struct PrefCacheAIOCB { > uint64_t sector_num; > uint32_t nb_sectors; > int aio_type; > + struct { > + QTAILQ_HEAD(req_head, PrefCachePartReq) list; > + CoMutex lock; > + } requests; > int ret; > } PrefCacheAIOCB; > > +typedef struct PrefCachePartReq { > + uint64_t sector_num; > + uint32_t nb_sectors; Should be byte-based, like everything. > + QEMUIOVector qiov; > + PCNode *node; > + PrefCacheAIOCB *acb; > + QTAILQ_ENTRY(PrefCachePartReq) entry; > +} PrefCachePartReq; > + > static const AIOCBInfo pcache_aiocb_info = { > .aiocb_size = sizeof(PrefCacheAIOCB), > }; > @@ -126,8 +143,39 @@ static QemuOptsList runtime_opts = { > #define MB_BITS 20 > #define PCACHE_DEFAULT_CACHE_SIZE (4 << MB_BITS) > > +enum { > + NODE_SUCCESS_STATUS = 0, > + NODE_WAIT_STATUS = 1, > + NODE_REMOVE_STATUS = 2, > + NODE_GHOST_STATUS = 3 /* only for debugging */ NODE_DELETED_STATUS? > +}; > + > #define PCNODE(_n) ((PCNode *)(_n)) > > +static inline void pcache_node_unref(PCNode *node) > +{ > + assert(node->status == NODE_SUCCESS_STATUS || > + node->status == NODE_REMOVE_STATUS); > + > + if (atomic_fetch_dec(&node->ref) == 0) { Atomics imply concurrency, which we don't have. > + assert(node->status == NODE_REMOVE_STATUS); > + > + node->status = NODE_GHOST_STATUS; > + g_free(node->data); > + g_slice_free1(sizeof(*node), node); When you switch to plain g_malloc(), this needs to be updated. > + } > +} > + > +static inline PCNode *pcache_node_ref(PCNode *node) > +{ > + assert(node->status == NODE_SUCCESS_STATUS || > + node->status == NODE_WAIT_STATUS); > + assert(atomic_read(&node->ref) == 0);/* XXX: only for sequential > requests */ > + atomic_inc(&node->ref); Do you expect concurrent accesses or not? Because if you don't, there is no need for atomics, but if you do, this is buggy because each of the lines is atomic for itself, but the assertion isn't atomic with the refcount increment. A ref() function that can take only a single reference feels odd anyway and this restriction seems to be lifted later. Why have it here? > + > + return node; > +} Kevin