linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
To: Minchan Kim <minchan@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH 3/3] zram: adjust the number of zram thread
Date: Fri, 21 Oct 2016 15:23:27 +0900	[thread overview]
Message-ID: <20161021062327.GC527@swordfish> (raw)
In-Reply-To: <1474526565-6676-3-git-send-email-minchan@kernel.org>

On (09/22/16 15:42), Minchan Kim wrote:
[..]
> +static int __zram_cpu_notifier(void *dummy, unsigned long action,
> +				unsigned long cpu)
>  {
>  	struct zram_worker *worker;
>  
> -	while (!list_empty(&workers.worker_list)) {
> +	switch (action) {
> +	case CPU_UP_PREPARE:
> +		worker = kmalloc(sizeof(*worker), GFP_KERNEL);
> +		if (!worker) {
> +			pr_err("Can't allocate a worker\n");
> +			return NOTIFY_BAD;
> +		}
> +
> +		worker->task = kthread_run(zram_thread, NULL, "zramd-%lu", cpu);
> +		if (IS_ERR(worker->task)) {
> +			kfree(worker);
> +			pr_err("Can't allocate a zram thread\n");
> +			return NOTIFY_BAD;
> +		}

well, strictly speaking we are have no strict bound-to-cpu (per-cpu)
requirement here, we just want to have num_online_cpus() worker threads.
if we fail to create one more worker thread nothing really bad happens,
so I think we better not block that cpu from coming online.
iow, always 'return NOTIFY_OK'.

	-ss

> +		spin_lock(&workers.req_lock);
> +		list_add(&worker->list, &workers.worker_list);
> +		spin_unlock(&workers.req_lock);
> +		break;
> +	case CPU_DEAD:
> +	case CPU_UP_CANCELED:
> +		spin_lock(&workers.req_lock);
> +		WARN_ON(list_empty(&workers.worker_list));
> +
>  		worker = list_first_entry(&workers.worker_list,
> -				struct zram_worker,
> -				list);
> -		kthread_stop(worker->task);
> +					struct zram_worker, list);
>  		list_del(&worker->list);
> +		spin_unlock(&workers.req_lock);
> +
> +		kthread_stop(worker->task);
>  		kfree(worker);
> +		break;
> +	default:
> +		break;
>  	}
> +	return NOTIFY_OK;
> +}
> +
> +static int zram_cpu_notifier(struct notifier_block *nb,
> +		unsigned long action, void *pcpu)
> +{
> +	unsigned long cpu = (unsigned long)pcpu;
> +
> +	return __zram_cpu_notifier(NULL, action, cpu);
> +}
> +
> +static void destroy_workers(void)
> +{
> +	unsigned long cpu;
> +
> +	cpu_notifier_register_begin();
> +	for_each_online_cpu(cpu)
> +		__zram_cpu_notifier(NULL, CPU_UP_CANCELED, cpu);
> +	__unregister_cpu_notifier(&workers.notifier);
> +	cpu_notifier_register_done();
>  
>  	WARN_ON(workers.nr_running);
>  }
>  
>  static int create_workers(void)
>  {
> -	int i;
> -	int nr_cpu = num_online_cpus();
> -	struct zram_worker *worker;
> +	int cpu;
>  
>  	INIT_LIST_HEAD(&workers.worker_list);
>  	INIT_LIST_HEAD(&workers.req_list);
>  	spin_lock_init(&workers.req_lock);
>  	init_waitqueue_head(&workers.req_wait);
>  
> -	for (i = 0; i < nr_cpu; i++) {
> -		worker = kmalloc(sizeof(*worker), GFP_KERNEL);
> -		if (!worker)
> -			goto error;
> -
> -		worker->task = kthread_run(zram_thread, NULL, "zramd-%d", i);
> -		if (IS_ERR(worker->task)) {
> -			kfree(worker);
> -			goto error;
> -		}
> -
> -		list_add(&worker->list, &workers.worker_list);
> +	workers.notifier.notifier_call = zram_cpu_notifier;
> +	cpu_notifier_register_begin();
> +	for_each_online_cpu(cpu) {
> +		if (__zram_cpu_notifier(NULL, CPU_UP_PREPARE, cpu) ==
> +				NOTIFY_BAD)
> +			goto cleanup;
>  	}
>  
> +	__register_cpu_notifier(&workers.notifier);
> +	cpu_notifier_register_done();
> +
>  	return 0;
> +cleanup:
> +	for_each_online_cpu(cpu)
> +		__zram_cpu_notifier(NULL, CPU_UP_CANCELED, cpu);
> +	cpu_notifier_register_done();
>  
> -error:
> -	destroy_workers();
> -	return 1;
> +	return -ENOMEM;
>  }
>  
>  static int zram_rw_async_page(struct zram *zram,
> -- 
> 2.7.4
> 

  reply	other threads:[~2016-10-21  6:23 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-09-22  6:42 [PATCH 1/3] zram: rename IO processing functions Minchan Kim
2016-09-22  6:42 ` [PATCH 2/3] zram: support page-based parallel write Minchan Kim
2016-09-29  3:18   ` Sergey Senozhatsky
2016-09-30  5:52     ` Minchan Kim
2016-10-04  4:43       ` Sergey Senozhatsky
2016-10-04  7:35         ` Minchan Kim
2016-10-05  2:01         ` Minchan Kim
2016-10-06  8:29           ` Sergey Senozhatsky
2016-10-07  6:33             ` Minchan Kim
2016-10-07 18:08               ` Sergey Senozhatsky
2016-10-17  5:04               ` Minchan Kim
2016-10-21  6:08                 ` Sergey Senozhatsky
2016-10-24  4:51                   ` Minchan Kim
2016-10-21  6:03   ` Sergey Senozhatsky
2016-10-24  4:47     ` Minchan Kim
2016-10-24  5:20       ` Sergey Senozhatsky
2016-10-24  5:58         ` Minchan Kim
2016-10-24  7:23           ` Sergey Senozhatsky
2016-09-22  6:42 ` [PATCH 3/3] zram: adjust the number of zram thread Minchan Kim
2016-10-21  6:23   ` Sergey Senozhatsky [this message]
2016-10-24  4:54     ` Minchan Kim
2016-10-24  5:29       ` Sergey Senozhatsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161021062327.GC527@swordfish \
    --to=sergey.senozhatsky.work@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=minchan@kernel.org \
    --cc=sergey.senozhatsky@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).