Kexec Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "\"Zhou, Wenjian/周文剑\"" <zhouwj-fnst@cn.fujitsu.com>
To: Atsushi Kumagai <ats-kumagai@wm.jp.nec.com>
Cc: "kexec@lists.infradead.org" <kexec@lists.infradead.org>
Subject: Re: [PATCH V5] Improve the performance of --num-threads -d 31
Date: Thu, 14 Apr 2016 09:10:55 +0800	[thread overview]
Message-ID: <570EEE1F.80401@cn.fujitsu.com> (raw)
In-Reply-To: <0910DD04CBD6DE4193FCF86B9C00BE9701E3A0AF@BPXM01GP.gisp.nec.co.jp>

On 04/13/2016 04:07 PM, Atsushi Kumagai wrote:
>>>>> +	}
>>>>> +
>>>>> +	limit_size = limit_size - MAP_REGION * info->num_threads;
>>>>> +
>>>
>>> This patch prioritizes the memory for multi thread since it is reserved first,
>>> but I think enough cyclic buffer should be reserved first because it's for more
>>> fundamental feature than multi-threading.
>>>
>>
>> I'm not sure what is the proper value of cyclic buffer size.
>> Should we leave 4MB for it?
>> Or calculate according to the bitmap_size?
>
> In commit:0b7328280, we decided 4MB is enough, please leave it.
>
> BTW, mmap() in 2nd kernel doesn't consume MAP_REGION(4MB) of physical memory
> since the target region is mapped directly to the old memory(/proc/vmcore).
> If 4MB region is mapped, 8KB of page table for the region will be created,
> *this* is the memory footprint of mmap().
>
> OTOH, mmap() in 1st kernel will consume physical memory to copy file data
> from disk to memory by page fault. However, 1st kernel environment must have
> enough free memory, we don't need to worry too much about it.
>
> So now I don't think MAP_REGION should be considered as memory footprint.
> How about you ?
>

Ah, I didn't know that.
And I was strange why the memory each threads costs is far less than 4MB.
Thanks a lot for telling me that!

-- 
Thanks
Zhou

>
> Thanks,
> Atsushi Kumagai
>
>> --
>> Thanks
>> Zhou
>>>
>>> Thanks,
>>> Atsushi Kumagai
>>>
>>>>>     	/* Try to keep both 1st and 2nd bitmap at the same time. */
>>>>>     	bitmap_size = info->max_mapnr * 2 / BITPERBYTE;
>>>>>
>>>>> diff --git a/makedumpfile.h b/makedumpfile.h
>>>>> index e0b5bbf..4b315c0 100644
>>>>> --- a/makedumpfile.h
>>>>> +++ b/makedumpfile.h
>>>>> @@ -44,6 +44,7 @@
>>>>>     #include "print_info.h"
>>>>>     #include "sadump_mod.h"
>>>>>     #include <pthread.h>
>>>>> +#include <semaphore.h>
>>>>>
>>>>>     /*
>>>>>      * Result of command
>>>>> @@ -977,7 +978,7 @@ typedef unsigned long long int ulonglong;
>>>>>     #define PAGE_DATA_NUM	(50)
>>>>>     #define WAIT_TIME	(60 * 10)
>>>>>     #define PTHREAD_FAIL	((void *)-2)
>>>>> -#define NUM_BUFFERS	(50)
>>>>> +#define NUM_BUFFERS	(20)
>>>>>
>>>>>     struct mmap_cache {
>>>>>     	char	*mmap_buf;
>>>>> @@ -985,28 +986,33 @@ struct mmap_cache {
>>>>>     	off_t   mmap_end_offset;
>>>>>     };
>>>>>
>>>>> +enum {
>>>>> +	FLAG_UNUSED,
>>>>> +	FLAG_READY,
>>>>> +	FLAG_FILLING
>>>>> +};
>>>>> +struct page_flag {
>>>>> +	mdf_pfn_t pfn;
>>>>> +	char zero;
>>>>> +	char ready;
>>>>> +	short index;
>>>>> +	struct page_flag *next;
>>>>> +};
>>>>> +
>>>>>     struct page_data
>>>>>     {
>>>>> -	mdf_pfn_t pfn;
>>>>> -	int dumpable;
>>>>> -	int zero;
>>>>> -	unsigned int flags;
>>>>>     	long size;
>>>>>     	unsigned char *buf;
>>>>> -	pthread_mutex_t mutex;
>>>>> -	/*
>>>>> -	 * whether the page_data is ready to be consumed
>>>>> -	 */
>>>>> -	int ready;
>>>>> +	int flags;
>>>>> +	int used;
>>>>>     };
>>>>>
>>>>>     struct thread_args {
>>>>>     	int thread_num;
>>>>>     	unsigned long len_buf_out;
>>>>> -	mdf_pfn_t start_pfn, end_pfn;
>>>>> -	int page_data_num;
>>>>>     	struct cycle *cycle;
>>>>>     	struct page_data *page_data_buf;
>>>>> +	struct page_flag *page_flag_buf;
>>>>>     };
>>>>>
>>>>>     /*
>>>>> @@ -1295,11 +1301,12 @@ struct DumpInfo {
>>>>>     	pthread_t **threads;
>>>>>     	struct thread_args *kdump_thread_args;
>>>>>     	struct page_data *page_data_buf;
>>>>> +	struct page_flag **page_flag_buf;
>>>>> +	sem_t page_flag_buf_sem;
>>>>>     	pthread_rwlock_t usemmap_rwlock;
>>>>>     	mdf_pfn_t current_pfn;
>>>>>     	pthread_mutex_t current_pfn_mutex;
>>>>> -	mdf_pfn_t consumed_pfn;
>>>>> -	pthread_mutex_t consumed_pfn_mutex;
>>>>> +	pthread_mutex_t page_data_mutex;
>>>>>     	pthread_mutex_t filter_mutex;
>>>>>     };
>>>>>     extern struct DumpInfo		*info;
>>>>>
>>>>
>>>>
>>>>
>>>> _______________________________________________
>>>> kexec mailing list
>>>> kexec@lists.infradead.org
>>>> http://lists.infradead.org/mailman/listinfo/kexec
>>
>>




_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  reply	other threads:[~2016-04-14  1:13 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-08  8:20 [PATCH V5] Improve the performance of --num-threads -d 31 Zhou Wenjian
2016-04-12  3:17 ` "Zhou, Wenjian/周文剑"
2016-04-12  8:25   ` Atsushi Kumagai
2016-04-13  0:38     ` "Zhou, Wenjian/周文剑"
2016-04-13  8:07       ` Atsushi Kumagai
2016-04-14  1:10         ` "Zhou, Wenjian/周文剑" [this message]
2016-04-13  4:53   ` Minfei Huang
2016-04-13  4:51     ` "Zhou, Wenjian/周文剑"

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=570EEE1F.80401@cn.fujitsu.com \
    --to=zhouwj-fnst@cn.fujitsu.com \
    --cc=ats-kumagai@wm.jp.nec.com \
    --cc=kexec@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox