public inbox for kexec@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
@ 2012-12-25  8:31 Zhang Yanfei
  0 siblings, 0 replies; 5+ messages in thread
From: Zhang Yanfei @ 2012-12-25  8:31 UTC (permalink / raw)
  To: horms; +Cc: kexec@lists.infradead.org

Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
---
 kexec/arch/i386/crashdump-x86.c |   93 ++++++++++++++++++---------------------
 1 files changed, 43 insertions(+), 50 deletions(-)

diff --git a/kexec/arch/i386/crashdump-x86.c b/kexec/arch/i386/crashdump-x86.c
index 245402c..63f6b2b 100644
--- a/kexec/arch/i386/crashdump-x86.c
+++ b/kexec/arch/i386/crashdump-x86.c
@@ -518,21 +518,22 @@ static int exclude_region(int *nr_ranges, uint64_t start, uint64_t end)
 
 /* Adds a segment from list of memory regions which new kernel can use to
  * boot. Segment start and end should be aligned to 1K boundary. */
-static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
-								size_t size)
+static int add_memmap(struct memory_range *memmap_p,
+		      unsigned long long addr,
+		      size_t size)
 {
 	int i, j, nr_entries = 0, tidx = 0, align = 1024;
 	unsigned long long mstart, mend;
 
 	/* Do alignment check. */
-	if ((addr%align) || (size%align))
+	if ((addr % align) || (size % align))
 		return -1;
 
 	/* Make sure at least one entry in list is free. */
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < CRASH_MAX_MEMMAP_NR; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (!mstart  && !mend)
+		if (!mstart && !mend)
 			break;
 		else
 			nr_entries++;
@@ -540,31 +541,29 @@ static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
 	if (nr_entries == CRASH_MAX_MEMMAP_NR)
 		return -1;
 
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			break;
-		if (mstart <= (addr+size-1) && mend >=addr)
+
+		if (mstart <= (addr + size - 1) && mend >= addr)
 			/* Overlapping region. */
 			return -1;
 		else if (addr > mend)
-			tidx = i+1;
+			tidx = i + 1;
 	}
-		/* Insert the memory region. */
-		for (j = nr_entries-1; j >= tidx; j--)
-			memmap_p[j+1] = memmap_p[j];
-		memmap_p[tidx].start = addr;
-		memmap_p[tidx].end = addr + size - 1;
+
+	/* Insert the memory region. */
+	for (j = nr_entries - 1; j >= tidx; j--)
+		memmap_p[j+1] = memmap_p[j];
+	memmap_p[tidx].start = addr;
+	memmap_p[tidx].end = addr + size - 1;
+	nr_entries++;
 
 	dbgprintf("Memmap after adding segment\n");
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			break;
-		dbgprintf("%016llx - %016llx\n",
-			mstart, mend);
+		dbgprintf("%016llx - %016llx\n", mstart, mend);
 	}
 
 	return 0;
@@ -572,19 +571,20 @@ static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
 
 /* Removes a segment from list of memory regions which new kernel can use to
  * boot. Segment start and end should be aligned to 1K boundary. */
-static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
-								size_t size)
+static int delete_memmap(struct memory_range *memmap_p,
+			 unsigned long long addr,
+			 size_t size)
 {
 	int i, j, nr_entries = 0, tidx = -1, operation = 0, align = 1024;
 	unsigned long long mstart, mend;
 	struct memory_range temp_region;
 
 	/* Do alignment check. */
-	if ((addr%align) || (size%align))
+	if ((addr % align) || (size % align))
 		return -1;
 
 	/* Make sure at least one entry in list is free. */
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < CRASH_MAX_MEMMAP_NR; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
 		if (!mstart  && !mend)
@@ -596,20 +596,16 @@ static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
 		/* List if full */
 		return -1;
 
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			/* Did not find the segment in the list. */
-			return -1;
+
 		if (mstart <= addr && mend >= (addr + size - 1)) {
 			if (mstart == addr && mend == (addr + size - 1)) {
 				/* Exact match. Delete region */
 				operation = -1;
 				tidx = i;
-				break;
-			}
-			if (mstart != addr && mend != (addr + size - 1)) {
+			} else if (mstart != addr && mend != (addr + size - 1)) {
 				/* Split in two */
 				memmap_p[i].end = addr - 1;
 				temp_region.start = addr + size;
@@ -617,41 +613,38 @@ static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
 				temp_region.type = memmap_p[i].type;
 				operation = 1;
 				tidx = i;
-				break;
-			}
-
-			/* No addition/deletion required. Adjust the existing.*/
-			if (mstart != addr) {
-				memmap_p[i].end = addr - 1;
-				break;
 			} else {
-				memmap_p[i].start = addr + size;
-				break;
+				/* No addition/deletion required. Adjust the existing.*/
+				if (mstart != addr)
+					memmap_p[i].end = addr - 1;
+				else
+					memmap_p[i].start = addr + size;
 			}
+			break;
 		}
 	}
-	if ((operation == 1) && tidx >=0) {
+
+	if (operation == 1 && tidx >= 0) {
 		/* Insert the split memory region. */
-		for (j = nr_entries-1; j > tidx; j--)
+		for (j = nr_entries - 1; j > tidx; j--)
 			memmap_p[j+1] = memmap_p[j];
 		memmap_p[tidx+1] = temp_region;
+		nr_entries++;
 	}
-	if ((operation == -1) && tidx >=0) {
+
+	if (operation == -1 && tidx >= 0) {
 		/* Delete the exact match memory region. */
-		for (j = i+1; j < CRASH_MAX_MEMMAP_NR; j++)
+		for (j = i + 1; j < nr_entries; j++)
 			memmap_p[j-1] = memmap_p[j];
 		memmap_p[j-1].start = memmap_p[j-1].end = 0;
+		nr_entries--;
 	}
 
 	dbgprintf("Memmap after deleting segment\n");
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0) {
-			break;
-		}
-		dbgprintf("%016llx - %016llx\n",
-			mstart, mend);
+		dbgprintf("%016llx - %016llx\n", mstart, mend);
 	}
 
 	return 0;
-- 
1.7.1

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
@ 2013-02-25 12:38 Zhang Yanfei
  2013-02-26  0:57 ` HATAYAMA Daisuke
  2013-02-26  0:58 ` HATAYAMA Daisuke
  0 siblings, 2 replies; 5+ messages in thread
From: Zhang Yanfei @ 2013-02-25 12:38 UTC (permalink / raw)
  To: Simon Horman; +Cc: kexec@lists.infradead.org

The code in the two functions seems a little messy, So I modify
some of them, trying to make the logic more clearly.

For example,
code before:

         for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
                 mstart = memmap_p[i].start;
                 mend = memmap_p[i].end;
                 if (mstart == 0 && mend == 0)
                         break;

for we already have nr_entries for this memmap_p array, so we needn't
have a check in every loop to see if we have go through the whole array.
code after:

         for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;

Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
---
 kexec/arch/i386/crashdump-x86.c |   93 ++++++++++++++++++---------------------
 1 files changed, 43 insertions(+), 50 deletions(-)

diff --git a/kexec/arch/i386/crashdump-x86.c b/kexec/arch/i386/crashdump-x86.c
index 245402c..63f6b2b 100644
--- a/kexec/arch/i386/crashdump-x86.c
+++ b/kexec/arch/i386/crashdump-x86.c
@@ -518,21 +518,22 @@ static int exclude_region(int *nr_ranges, uint64_t start, uint64_t end)
 
 /* Adds a segment from list of memory regions which new kernel can use to
  * boot. Segment start and end should be aligned to 1K boundary. */
-static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
-								size_t size)
+static int add_memmap(struct memory_range *memmap_p,
+		      unsigned long long addr,
+		      size_t size)
 {
 	int i, j, nr_entries = 0, tidx = 0, align = 1024;
 	unsigned long long mstart, mend;
 
 	/* Do alignment check. */
-	if ((addr%align) || (size%align))
+	if ((addr % align) || (size % align))
 		return -1;
 
 	/* Make sure at least one entry in list is free. */
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < CRASH_MAX_MEMMAP_NR; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (!mstart  && !mend)
+		if (!mstart && !mend)
 			break;
 		else
 			nr_entries++;
@@ -540,31 +541,29 @@ static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
 	if (nr_entries == CRASH_MAX_MEMMAP_NR)
 		return -1;
 
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			break;
-		if (mstart <= (addr+size-1) && mend >=addr)
+
+		if (mstart <= (addr + size - 1) && mend >= addr)
 			/* Overlapping region. */
 			return -1;
 		else if (addr > mend)
-			tidx = i+1;
+			tidx = i + 1;
 	}
-		/* Insert the memory region. */
-		for (j = nr_entries-1; j >= tidx; j--)
-			memmap_p[j+1] = memmap_p[j];
-		memmap_p[tidx].start = addr;
-		memmap_p[tidx].end = addr + size - 1;
+
+	/* Insert the memory region. */
+	for (j = nr_entries - 1; j >= tidx; j--)
+		memmap_p[j+1] = memmap_p[j];
+	memmap_p[tidx].start = addr;
+	memmap_p[tidx].end = addr + size - 1;
+	nr_entries++;
 
 	dbgprintf("Memmap after adding segment\n");
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			break;
-		dbgprintf("%016llx - %016llx\n",
-			mstart, mend);
+		dbgprintf("%016llx - %016llx\n", mstart, mend);
 	}
 
 	return 0;
@@ -572,19 +571,20 @@ static int add_memmap(struct memory_range *memmap_p, unsigned long long addr,
 
 /* Removes a segment from list of memory regions which new kernel can use to
  * boot. Segment start and end should be aligned to 1K boundary. */
-static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
-								size_t size)
+static int delete_memmap(struct memory_range *memmap_p,
+			 unsigned long long addr,
+			 size_t size)
 {
 	int i, j, nr_entries = 0, tidx = -1, operation = 0, align = 1024;
 	unsigned long long mstart, mend;
 	struct memory_range temp_region;
 
 	/* Do alignment check. */
-	if ((addr%align) || (size%align))
+	if ((addr % align) || (size % align))
 		return -1;
 
 	/* Make sure at least one entry in list is free. */
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < CRASH_MAX_MEMMAP_NR; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
 		if (!mstart  && !mend)
@@ -596,20 +596,16 @@ static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
 		/* List if full */
 		return -1;
 
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0)
-			/* Did not find the segment in the list. */
-			return -1;
+
 		if (mstart <= addr && mend >= (addr + size - 1)) {
 			if (mstart == addr && mend == (addr + size - 1)) {
 				/* Exact match. Delete region */
 				operation = -1;
 				tidx = i;
-				break;
-			}
-			if (mstart != addr && mend != (addr + size - 1)) {
+			} else if (mstart != addr && mend != (addr + size - 1)) {
 				/* Split in two */
 				memmap_p[i].end = addr - 1;
 				temp_region.start = addr + size;
@@ -617,41 +613,38 @@ static int delete_memmap(struct memory_range *memmap_p, unsigned long long addr,
 				temp_region.type = memmap_p[i].type;
 				operation = 1;
 				tidx = i;
-				break;
-			}
-
-			/* No addition/deletion required. Adjust the existing.*/
-			if (mstart != addr) {
-				memmap_p[i].end = addr - 1;
-				break;
 			} else {
-				memmap_p[i].start = addr + size;
-				break;
+				/* No addition/deletion required. Adjust the existing.*/
+				if (mstart != addr)
+					memmap_p[i].end = addr - 1;
+				else
+					memmap_p[i].start = addr + size;
 			}
+			break;
 		}
 	}
-	if ((operation == 1) && tidx >=0) {
+
+	if (operation == 1 && tidx >= 0) {
 		/* Insert the split memory region. */
-		for (j = nr_entries-1; j > tidx; j--)
+		for (j = nr_entries - 1; j > tidx; j--)
 			memmap_p[j+1] = memmap_p[j];
 		memmap_p[tidx+1] = temp_region;
+		nr_entries++;
 	}
-	if ((operation == -1) && tidx >=0) {
+
+	if (operation == -1 && tidx >= 0) {
 		/* Delete the exact match memory region. */
-		for (j = i+1; j < CRASH_MAX_MEMMAP_NR; j++)
+		for (j = i + 1; j < nr_entries; j++)
 			memmap_p[j-1] = memmap_p[j];
 		memmap_p[j-1].start = memmap_p[j-1].end = 0;
+		nr_entries--;
 	}
 
 	dbgprintf("Memmap after deleting segment\n");
-	for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
+	for (i = 0; i < nr_entries; i++) {
 		mstart = memmap_p[i].start;
 		mend = memmap_p[i].end;
-		if (mstart == 0 && mend == 0) {
-			break;
-		}
-		dbgprintf("%016llx - %016llx\n",
-			mstart, mend);
+		dbgprintf("%016llx - %016llx\n", mstart, mend);
 	}
 
 	return 0;
-- 
1.7.1

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
  2013-02-25 12:38 Zhang Yanfei
@ 2013-02-26  0:57 ` HATAYAMA Daisuke
  2013-02-26  0:58 ` HATAYAMA Daisuke
  1 sibling, 0 replies; 5+ messages in thread
From: HATAYAMA Daisuke @ 2013-02-26  0:57 UTC (permalink / raw)
  To: zhangyanfei; +Cc: horms, kexec

From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Subject: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
Date: Mon, 25 Feb 2013 20:38:41 +0800

> The code in the two functions seems a little messy, So I modify
> some of them, trying to make the logic more clearly.
> 
> For example,
> code before:
> 
>          for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
>                  mstart = memmap_p[i].start;
>                  mend = memmap_p[i].end;
>                  if (mstart == 0 && mend == 0)
>                          break;
> 
> for we already have nr_entries for this memmap_p array, so we needn't
> have a check in every loop to see if we have go through the whole array.
> code after:
> 
>          for (i = 0; i < nr_entries; i++) {
>  		mstart = memmap_p[i].start;
>  		mend = memmap_p[i].end;
> 

Then, but even if doing so, times of the loop is unchanged after your
fix. The loop doesn't always look up a whole part of memmap_p; it
looks up until it reaches the end of elements that has 0 in their
members. Also, CRASH_MAX_MEMMAP_NR appears 17 on i386. Is it
problematic in performance?

Rather, it seems problematic to me how length of memmap_p is handled
here. It is initialized first here:

        /* Memory regions which panic kernel can safely use to boot into */
        sz = (sizeof(struct memory_range) * (KEXEC_MAX_SEGMENTS + 1));
        memmap_p = xmalloc(sz);
        memset(memmaSp_p, 0, sz);

and then it is passed to add_memmap,

        add_memmap(memmap_p, info->backup_src_start, info->backup_src_size);

and there the passed memmap_p is assumed to have length of
CRASH_MAX_MEMMAP_NR that is defined as (KEXEC_MAX_SEGMENTS + 1)
according to the condition of the for loop above. (The
(KEXEC_MAX_SEGMENTS + 1) in the allocation should also be
CRASH_MAX_MEMMAP_NR for clarification?)

The ideas I have for cleaning up here, are for example:

- introduce specific for-each statement to iterate memmap_p just like:

  for_each_memmap(memmap_p, start, end) {
    ...
  }

  or

- use struct memory_ranges instead of struct memory_range and pass it
  to add_memmap; the former has size member in addition to ranges
  member, and then in add_memmap:

  for (i = 0; i < ranges.size; i++) {
    mstart = ranges.ranges[i].start;
    mend = ranges.ranges[i].end;
  }

The former hides actual length of memmap_p from users because they
don't need to care about it, while the latter makes length of memmap_p
explicitly handled even in add_memmp.

But sorry, this idea comes from some minuts look on the code around.

Also, you should have divided the patch into two: nr_entires part and
the others.

Thanks.
HATAYAMA, Daisuke


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
  2013-02-25 12:38 Zhang Yanfei
  2013-02-26  0:57 ` HATAYAMA Daisuke
@ 2013-02-26  0:58 ` HATAYAMA Daisuke
  2013-03-05  2:25   ` Simon Horman
  1 sibling, 1 reply; 5+ messages in thread
From: HATAYAMA Daisuke @ 2013-02-26  0:58 UTC (permalink / raw)
  To: zhangyanfei; +Cc: horms, kexec

From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Subject: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
Date: Mon, 25 Feb 2013 20:38:41 +0800

> The code in the two functions seems a little messy, So I modify
> some of them, trying to make the logic more clearly.
> 
> For example,
> code before:
> 
>          for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
>                  mstart = memmap_p[i].start;
>                  mend = memmap_p[i].end;
>                  if (mstart == 0 && mend == 0)
>                          break;
> 
> for we already have nr_entries for this memmap_p array, so we needn't
> have a check in every loop to see if we have go through the whole array.
> code after:
> 
>          for (i = 0; i < nr_entries; i++) {
>  		mstart = memmap_p[i].start;
>  		mend = memmap_p[i].end;
> 

Then, but even if doing so, times of the loop is unchanged after your
fix. The loop doesn't always look up a whole part of memmap_p; it
looks up until it reaches the end of elements with 0 in their members.
Also, CRASH_MAX_MEMMAP_NR is 17 on i386. Is it problematic in
performance?

Rather, it seems problematic to me how length of memmap_p is handled
here. It is initialized first here:

        /* Memory regions which panic kernel can safely use to boot into */
        sz = (sizeof(struct memory_range) * (KEXEC_MAX_SEGMENTS + 1));
        memmap_p = xmalloc(sz);
        memset(memmaSp_p, 0, sz);

and then it is passed to add_memmap,

        add_memmap(memmap_p, info->backup_src_start, info->backup_src_size);

and there the passed memmap_p is assumed to have length of
CRASH_MAX_MEMMAP_NR that is defined as (KEXEC_MAX_SEGMENTS + 1)
according to the condition of the for loop above. (The
(KEXEC_MAX_SEGMENTS + 1) in the allocation should also be
CRASH_MAX_MEMMAP_NR for clarification?)

The ideas I have for cleaning up here, are for example:

- introduce specific for-each statement to iterate memmap_p just like:

  for_each_memmap(memmap_p, start, end) {
    ...
  }

  or

- use struct memory_ranges instead of struct memory_range and pass it
  to add_memmap; the former has size member in addition to ranges
  member, and then in add_memmap:

  for (i = 0; i < ranges.size; i++) {
    mstart = ranges.ranges[i].start;
    mend = ranges.ranges[i].end;
  }

The former hides actual length of memmap_p from users because they
don't need to care about it, while the latter makes length of memmap_p
explicitly handled even in add_memmp.

But sorry, this idea comes from some minuts look on the code around.

Also, you should have divided the patch into two: nr_entires part and
the others.

Thanks.
HATAYAMA, Daisuke


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
  2013-02-26  0:58 ` HATAYAMA Daisuke
@ 2013-03-05  2:25   ` Simon Horman
  0 siblings, 0 replies; 5+ messages in thread
From: Simon Horman @ 2013-03-05  2:25 UTC (permalink / raw)
  To: HATAYAMA Daisuke; +Cc: kexec, zhangyanfei

On Tue, Feb 26, 2013 at 09:58:14AM +0900, HATAYAMA Daisuke wrote:
> From: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
> Subject: [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap
> Date: Mon, 25 Feb 2013 20:38:41 +0800
> 
> > The code in the two functions seems a little messy, So I modify
> > some of them, trying to make the logic more clearly.
> > 
> > For example,
> > code before:
> > 
> >          for (i = 0; i < CRASH_MAX_MEMMAP_NR;  i++) {
> >                  mstart = memmap_p[i].start;
> >                  mend = memmap_p[i].end;
> >                  if (mstart == 0 && mend == 0)
> >                          break;
> > 
> > for we already have nr_entries for this memmap_p array, so we needn't
> > have a check in every loop to see if we have go through the whole array.
> > code after:
> > 
> >          for (i = 0; i < nr_entries; i++) {
> >  		mstart = memmap_p[i].start;
> >  		mend = memmap_p[i].end;
> > 
> 
> Then, but even if doing so, times of the loop is unchanged after your
> fix. The loop doesn't always look up a whole part of memmap_p; it
> looks up until it reaches the end of elements with 0 in their members.
> Also, CRASH_MAX_MEMMAP_NR is 17 on i386. Is it problematic in
> performance?
> 
> Rather, it seems problematic to me how length of memmap_p is handled
> here. It is initialized first here:
> 
>         /* Memory regions which panic kernel can safely use to boot into */
>         sz = (sizeof(struct memory_range) * (KEXEC_MAX_SEGMENTS + 1));
>         memmap_p = xmalloc(sz);
>         memset(memmaSp_p, 0, sz);
> 
> and then it is passed to add_memmap,
> 
>         add_memmap(memmap_p, info->backup_src_start, info->backup_src_size);
> 
> and there the passed memmap_p is assumed to have length of
> CRASH_MAX_MEMMAP_NR that is defined as (KEXEC_MAX_SEGMENTS + 1)
> according to the condition of the for loop above. (The
> (KEXEC_MAX_SEGMENTS + 1) in the allocation should also be
> CRASH_MAX_MEMMAP_NR for clarification?)
> 
> The ideas I have for cleaning up here, are for example:
> 
> - introduce specific for-each statement to iterate memmap_p just like:
> 
>   for_each_memmap(memmap_p, start, end) {
>     ...
>   }
> 
>   or
> 
> - use struct memory_ranges instead of struct memory_range and pass it
>   to add_memmap; the former has size member in addition to ranges
>   member, and then in add_memmap:
> 
>   for (i = 0; i < ranges.size; i++) {
>     mstart = ranges.ranges[i].start;
>     mend = ranges.ranges[i].end;
>   }
> 
> The former hides actual length of memmap_p from users because they
> don't need to care about it, while the latter makes length of memmap_p
> explicitly handled even in add_memmp.
> 
> But sorry, this idea comes from some minuts look on the code around.
> 
> Also, you should have divided the patch into two: nr_entires part and
> the others.

For the record, I am not taking this change unless some
consensus is reached.

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2013-03-05 23:48 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-12-25  8:31 [PATCH] kexec,x86: code optimization and adjustment for add_memmap and delete_memmap Zhang Yanfei
  -- strict thread matches above, loose matches on Subject: below --
2013-02-25 12:38 Zhang Yanfei
2013-02-26  0:57 ` HATAYAMA Daisuke
2013-02-26  0:58 ` HATAYAMA Daisuke
2013-03-05  2:25   ` Simon Horman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox