From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:43561) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aIuHP-0004Jj-Cf for qemu-devel@nongnu.org; Tue, 12 Jan 2016 03:26:16 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1aIuHL-0006QZ-6X for qemu-devel@nongnu.org; Tue, 12 Jan 2016 03:26:15 -0500 Message-ID: <5694B894.6010102@virtuozzo.com> Date: Tue, 12 Jan 2016 11:25:56 +0300 From: Vladimir Sementsov-Ogievskiy MIME-Version: 1.0 References: <1451903234-32529-1-git-send-email-famz@redhat.com> <1451903234-32529-7-git-send-email-famz@redhat.com> <5693CCE8.2010707@virtuozzo.com> <5693FAE3.7070304@redhat.com> In-Reply-To: <5693FAE3.7070304@redhat.com> Content-Type: text/plain; charset="utf-8"; format=flowed Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH 06/13] HBitmap: Introduce "meta" bitmap to track bit changes List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: John Snow , Fam Zheng , qemu-devel@nongnu.org Cc: Kevin Wolf , Jeff Cody , qemu-block@nongnu.org On 11.01.2016 21:56, John Snow wrote: > > On 01/11/2016 10:40 AM, Vladimir Sementsov-Ogievskiy wrote: >> On 04.01.2016 13:27, Fam Zheng wrote: >>> Upon each bit toggle, the corresponding bit in the meta bitmap will be >>> set. >>> >>> Signed-off-by: Fam Zheng >>> --- >>> include/qemu/hbitmap.h | 8 +++++++ >>> util/hbitmap.c | 61 >>> +++++++++++++++++++++++++++++++++++++------------- >>> 2 files changed, 54 insertions(+), 15 deletions(-) >>> >>> diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h >>> index bb94a00..ed672e7 100644 >>> --- a/include/qemu/hbitmap.h >>> +++ b/include/qemu/hbitmap.h >>> @@ -181,6 +181,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const >>> HBitmap *hb, uint64_t first); >>> */ >>> unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); >>> +/* hbitmap_create_meta >>> + * Create a "meta" hbitmap to track dirtiness of the bits in this >>> HBitmap. >>> + * >>> + * @hb: The HBitmap to operate on. >>> + * @chunk_size: How many bits in @hb does one bit in the meta track. >>> + */ >>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size); >>> + >>> /** >>> * hbitmap_iter_next: >>> * @hbi: HBitmapIter to operate on. >>> diff --git a/util/hbitmap.c b/util/hbitmap.c >>> index 50b888f..55d3182 100644 >>> --- a/util/hbitmap.c >>> +++ b/util/hbitmap.c >>> @@ -81,6 +81,9 @@ struct HBitmap { >>> */ >>> int granularity; >>> + /* A meta dirty bitmap to track the dirtiness of bits in this >>> HBitmap. */ >>> + HBitmap *meta; >>> + >>> /* A number of progressively less coarse bitmaps (i.e. level 0 >>> is the >>> * coarsest). Each bit in level N represents a word in level >>> N+1 that >>> * has a set bit, except the last level where each bit >>> represents the >>> @@ -212,25 +215,27 @@ static uint64_t hb_count_between(HBitmap *hb, >>> uint64_t start, uint64_t last) >>> } >>> /* Setting starts at the last layer and propagates up if an element >>> - * changes from zero to non-zero. >>> + * changes. >>> */ >>> static inline bool hb_set_elem(unsigned long *elem, uint64_t start, >>> uint64_t last) >>> { >>> unsigned long mask; >>> - bool changed; >>> + unsigned long old; >>> assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); >>> assert(start <= last); >>> mask = 2UL << (last & (BITS_PER_LONG - 1)); >>> mask -= 1UL << (start & (BITS_PER_LONG - 1)); >>> - changed = (*elem == 0); >>> + old = *elem; >>> *elem |= mask; >>> - return changed; >>> + return old != *elem; >>> } >>> -/* The recursive workhorse (the depth is limited to >>> HBITMAP_LEVELS)... */ >>> -static void hb_set_between(HBitmap *hb, int level, uint64_t start, >>> uint64_t last) >>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... >>> + * Returns true if at least one bit is changed. */ >>> +static bool hb_set_between(HBitmap *hb, int level, uint64_t start, >>> + uint64_t last) >>> { >>> size_t pos = start >> BITS_PER_LEVEL; >>> size_t lastpos = last >> BITS_PER_LEVEL; >>> @@ -259,22 +264,27 @@ static void hb_set_between(HBitmap *hb, int >>> level, uint64_t start, uint64_t last >>> if (level > 0 && changed) { >>> hb_set_between(hb, level - 1, pos, lastpos); >>> } >>> + return changed; >>> } >>> void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count) >>> { >>> /* Compute range in the last layer. */ >>> + uint64_t first, n; >>> uint64_t last = start + count - 1; >>> trace_hbitmap_set(hb, start, count, >>> start >> hb->granularity, last >> >>> hb->granularity); >>> - start >>= hb->granularity; >>> + first = start >> hb->granularity; >>> last >>= hb->granularity; >>> - count = last - start + 1; >>> + n = last - first + 1; >>> - hb->count += count - hb_count_between(hb, start, last); >>> - hb_set_between(hb, HBITMAP_LEVELS - 1, start, last); >>> + hb->count += n - hb_count_between(hb, first, last); >>> + if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) && >>> + hb->meta) { >> I don't know, what optimizer things about it, but definetly >> >> + if (hb->meta && >> + hb_set_between(hb, HBITMAP_LEVELS - 1, first, last)) >> >> should work faster for most cases, when hb->meta == NULL. >> >> > The hb_set_between is first to ensure it always happens. oh, right. imho, it would be better then to add bool changed = hb_set_between(), and then if (changed && hb->meta), but it's up to you > >>> + hbitmap_set(hb->meta, start, count); >>> + } >>> } >>> /* Resetting works the other way round: propagate up if the new >>> @@ -295,8 +305,10 @@ static inline bool hb_reset_elem(unsigned long >>> *elem, uint64_t start, uint64_t l >>> return blanked; >>> } >>> -/* The recursive workhorse (the depth is limited to >>> HBITMAP_LEVELS)... */ >>> -static void hb_reset_between(HBitmap *hb, int level, uint64_t start, >>> uint64_t last) >>> +/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... >>> + * Returns true if at least one bit is changed. */ >>> +static bool hb_reset_between(HBitmap *hb, int level, uint64_t start, >>> + uint64_t last) >>> { >>> size_t pos = start >> BITS_PER_LEVEL; >>> size_t lastpos = last >> BITS_PER_LEVEL; >>> @@ -339,21 +351,28 @@ static void hb_reset_between(HBitmap *hb, int >>> level, uint64_t start, uint64_t la >>> if (level > 0 && changed) { >>> hb_reset_between(hb, level - 1, pos, lastpos); >>> } >>> + >>> + return changed; >>> + >>> } >>> void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count) >>> { >>> /* Compute range in the last layer. */ >>> + uint64_t first; >>> uint64_t last = start + count - 1; >>> trace_hbitmap_reset(hb, start, count, >>> start >> hb->granularity, last >> >>> hb->granularity); >>> - start >>= hb->granularity; >>> + first = start >> hb->granularity; >>> last >>= hb->granularity; >>> - hb->count -= hb_count_between(hb, start, last); >>> - hb_reset_between(hb, HBITMAP_LEVELS - 1, start, last); >>> + hb->count -= hb_count_between(hb, first, last); >>> + if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) && >>> + hb->meta) { >> and here >> >>> + hbitmap_set(hb->meta, start, count); >>> + } >>> } >>> void hbitmap_reset_all(HBitmap *hb) >>> @@ -384,6 +403,9 @@ void hbitmap_free(HBitmap *hb) >>> for (i = HBITMAP_LEVELS; i-- > 0; ) { >>> g_free(hb->levels[i]); >>> } >>> + if (hb->meta) { >>> + hbitmap_free(hb->meta); >>> + } >> hmm, not obvious for me.. why not "the one who creates must than destroy"? >> >>> g_free(hb); >>> } >>> @@ -493,3 +515,12 @@ bool hbitmap_merge(HBitmap *a, const HBitmap *b) >>> return true; >>> } >>> + >>> +HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size) >>> +{ >>> + assert(!(chunk_size & (chunk_size - 1))); >>> + assert(!hb->meta); >>> + hb->meta = hbitmap_alloc(hb->size << hb->granularity, >>> + hb->granularity + ctz32(chunk_size)); >>> + return hb->meta; >>> +} >> -- Best regards, Vladimir * now, @virtuozzo.com instead of @parallels.com. Sorry for this inconvenience.