From: Marcelo Tosatti <mtosatti@redhat.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: Luiz Capitulino <lcapitulino@redhat.com>,
Rik van Riel <riel@redhat.com>,
Linux RT Users <linux-rt-users@vger.kernel.org>,
Marcelo Tosatti <mtosatti@redhat.com>
Subject: [patch 3/3] MM: allow per-cpu vmstat_worker configuration
Date: Wed, 03 May 2017 15:40:10 -0300 [thread overview]
Message-ID: <20170503184039.901336380@redhat.com> (raw)
In-Reply-To: 20170503184007.174707977@redhat.com
[-- Attachment #1: vmstat-worker-disinterface --]
[-- Type: text/plain, Size: 4008 bytes --]
Following the reasoning on the last patch in the series,
this patch allows configuration of the per-CPU vmstat worker:
it allows the user to disable the per-CPU vmstat worker.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
--- linux/mm/vmstat.c.sothresh 2017-05-03 11:01:17.465914562 -0300
+++ linux/mm/vmstat.c 2017-05-03 11:01:39.746961917 -0300
@@ -92,6 +92,7 @@
EXPORT_SYMBOL(vm_node_stat);
struct vmstat_uparam {
+ atomic_t vmstat_work_enabled;
atomic_t user_stat_thresh;
};
@@ -1606,6 +1607,9 @@
long val;
int err;
int i;
+ int cpu;
+ struct work_struct __percpu *works;
+ static struct cpumask has_work;
/*
* The regular update, every sysctl_stat_interval, may come later
@@ -1619,9 +1623,31 @@
* transiently negative values, report an error here if any of
* the stats is negative, so we know to go looking for imbalance.
*/
- err = schedule_on_each_cpu(refresh_vm_stats);
- if (err)
- return err;
+
+ works = alloc_percpu(struct work_struct);
+ if (!works)
+ return -ENOMEM;
+
+ cpumask_clear(&has_work);
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+ struct vmstat_uparam *vup = &per_cpu(vmstat_uparam, cpu);
+
+ if (atomic_read(&vup->vmstat_work_enabled)) {
+ INIT_WORK(work, refresh_vm_stats);
+ schedule_work_on(cpu, work);
+ cpumask_set_cpu(cpu, &has_work);
+ }
+ }
+
+ for_each_cpu(cpu, &has_work)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ put_online_cpus();
+ free_percpu(works);
+
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
val = atomic_long_read(&vm_zone_stat[i]);
if (val < 0) {
@@ -1713,6 +1739,10 @@
/* Check processors whose vmstat worker threads have been disabled */
for_each_online_cpu(cpu) {
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
+ struct vmstat_uparam *vup = &per_cpu(vmstat_uparam, cpu);
+
+ if (atomic_read(&vup->vmstat_work_enabled) == 0)
+ continue;
if (!delayed_work_pending(dw) && need_update(cpu))
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
@@ -1737,6 +1767,40 @@
#ifdef CONFIG_SYSFS
+static ssize_t vmstat_worker_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int cpu = dev->id;
+ struct vmstat_uparam *vup = &per_cpu(vmstat_uparam, cpu);
+
+ return sprintf(buf, "%d\n", atomic_read(&vup->vmstat_work_enabled));
+}
+
+static ssize_t vmstat_worker_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, val;
+ struct vmstat_uparam *vup;
+ unsigned int cpu = dev->id;
+
+ ret = sscanf(buf, "%d", &val);
+ if (ret != 1 || val > 1 || val < 0)
+ return -EINVAL;
+
+ preempt_disable();
+
+ if (cpu_online(cpu)) {
+ vup = &per_cpu(vmstat_uparam, cpu);
+ atomic_set(&vup->vmstat_work_enabled, val);
+ } else
+ count = -EINVAL;
+
+ preempt_enable();
+
+ return count;
+}
+
static ssize_t vmstat_thresh_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1779,10 +1843,14 @@
return count;
}
+struct device_attribute vmstat_worker_attr =
+ __ATTR(vmstat_worker, 0644, vmstat_worker_show, vmstat_worker_store);
+
struct device_attribute vmstat_threshold_attr =
__ATTR(vmstat_threshold, 0644, vmstat_thresh_show, vmstat_thresh_store);
static struct attribute *vmstat_attrs[] = {
+ &vmstat_worker_attr.attr,
&vmstat_threshold_attr.attr,
NULL
};
@@ -1820,6 +1888,7 @@
struct vmstat_uparam *vup = &per_cpu(vmstat_uparam, cpu);
atomic_set(&vup->user_stat_thresh, 0);
+ atomic_set(&vup->vmstat_work_enabled, 1);
}
}
@@ -1857,6 +1926,7 @@
node = cpu_to_node(cpu);
atomic_set(&vup->user_stat_thresh, 0);
+ atomic_set(&vup->vmstat_work_enabled, 1);
refresh_zone_stat_thresholds();
node_cpus = cpumask_of_node(node);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-05-03 18:45 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-03 18:40 [patch 0/3] per-CPU vmstat thresholds and vmstat worker disablement (v2) Marcelo Tosatti
2017-05-03 18:40 ` [patch 1/3] MM: remove unused quiet_vmstat function Marcelo Tosatti
2017-05-10 13:36 ` Rik van Riel
2017-05-03 18:40 ` [patch 2/3] MM: allow per-cpu vmstat_threshold configuration Marcelo Tosatti
2017-05-03 18:40 ` Marcelo Tosatti [this message]
2017-05-10 15:34 ` [patch 3/3] MM: allow per-cpu vmstat_worker configuration Rik van Riel
2017-05-11 15:33 ` Marcelo Tosatti
2017-05-16 1:31 ` [lkp-robot] [MM] 3e38a07a66: ltp.proc01.fail kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170503184039.901336380@redhat.com \
--to=mtosatti@redhat.com \
--cc=lcapitulino@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-rt-users@vger.kernel.org \
--cc=riel@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).