From: ChenLiang <chenlian...@huawei.com> xbzrle_encode_buffer checks the value in the vm ram repeatedly. It is risk if runs xbzrle_encode_buffer on changing data.
Signed-off-by: ChenLiang <chenlian...@huawei.com> Signed-off-by: Gonglei <arei.gong...@huawei.com> Reviewed-by: Dr. David Alan Gilbert <dgilb...@redhat.com> --- xbzrle.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/xbzrle.c b/xbzrle.c index 8e220bf..d27a140 100644 --- a/xbzrle.c +++ b/xbzrle.c @@ -27,7 +27,7 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, uint8_t *dst, int dlen) { uint32_t zrun_len = 0, nzrun_len = 0; - int d = 0, i = 0; + int d = 0, i = 0, j; long res; uint8_t *nzrun_start = NULL; @@ -82,6 +82,8 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, if (d + 2 > dlen) { return -1; } + i++; + nzrun_len++; /* not aligned to sizeof(long) */ res = (slen - i) % sizeof(long); while (res && old_buf[i] != new_buf[i]) { @@ -96,14 +98,18 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, unsigned long mask = (unsigned long)0x0101010101010101ULL; while (i < slen) { unsigned long xor; + uint8_t *xor_ptr = (uint8_t *)(&xor); xor = *(unsigned long *)(old_buf + i) ^ *(unsigned long *)(new_buf + i); if ((xor - mask) & ~xor & (mask << 7)) { /* found the end of an nzrun within the current long */ - while (old_buf[i] != new_buf[i]) { - nzrun_len++; - i++; + for (j = 0; j < sizeof(long); j++) { + if (0 == xor_ptr[j]) { + break; + } } + i += j; + nzrun_len += j; break; } else { i += sizeof(long); @@ -120,6 +126,8 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, memcpy(dst + d, nzrun_start, nzrun_len); d += nzrun_len; nzrun_len = 0; + i++; + zrun_len++; } return d; -- 1.7.12.4