* [RFC] [PATCH] memcg: cleanup memory thresholds
@ 2010-05-09 0:11 Kirill A. Shutemov
2010-05-17 13:24 ` Kirill A. Shutemov
2010-05-17 20:17 ` Paul Menage
0 siblings, 2 replies; 3+ messages in thread
From: Kirill A. Shutemov @ 2010-05-09 0:11 UTC (permalink / raw)
To: linux-mm, containers
Cc: Kirill A. Shutemov, Andrew Morton, Phil Carmody, Balbir Singh,
Daisuke Nishimura, KAMEZAWA Hiroyuki, Paul Menage, Li Zefan,
linux-kernel
Introduce struct mem_cgroup_thresholds. It helps to reduce number of
checks of thresholds type (memory or mem+swap).
Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
---
mm/memcontrol.c | 151 ++++++++++++++++++++++++-------------------------------
1 files changed, 66 insertions(+), 85 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a6d2a4c..a6c6268 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -158,6 +158,18 @@ struct mem_cgroup_threshold_ary {
/* Array of thresholds */
struct mem_cgroup_threshold entries[0];
};
+
+struct mem_cgroup_thresholds {
+ /* Primary thresholds array */
+ struct mem_cgroup_threshold_ary *primary;
+ /*
+ * Spare threshold array.
+ * It needed to make mem_cgroup_unregister_event() "never fail".
+ * It must be able to store at least primary->size - 1 entires.
+ */
+ struct mem_cgroup_threshold_ary *spare;
+};
+
/* for OOM */
struct mem_cgroup_eventfd_list {
struct list_head list;
@@ -224,20 +236,10 @@ struct mem_cgroup {
struct mutex thresholds_lock;
/* thresholds for memory usage. RCU-protected */
- struct mem_cgroup_threshold_ary *thresholds;
-
- /*
- * Preallocated buffer to be used in mem_cgroup_unregister_event()
- * to make it "never fail".
- * It must be able to store at least thresholds->size - 1 entries.
- */
- struct mem_cgroup_threshold_ary *__thresholds;
+ struct mem_cgroup_thresholds thresholds;
/* thresholds for mem+swap usage. RCU-protected */
- struct mem_cgroup_threshold_ary *memsw_thresholds;
-
- /* the same as __thresholds, but for memsw_thresholds */
- struct mem_cgroup_threshold_ary *__memsw_thresholds;
+ struct mem_cgroup_thresholds memsw_thresholds;
/* For oom notifier event fd */
struct list_head oom_notify;
@@ -3438,9 +3440,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
rcu_read_lock();
if (!swap)
- t = rcu_dereference(memcg->thresholds);
+ t = rcu_dereference(memcg->thresholds.primary);
else
- t = rcu_dereference(memcg->memsw_thresholds);
+ t = rcu_dereference(memcg->memsw_thresholds.primary);
if (!t)
goto unlock;
@@ -3514,91 +3516,78 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
+ struct mem_cgroup_thresholds *thresholds;
+ struct mem_cgroup_threshold_ary *new;
int type = MEMFILE_TYPE(cft->private);
u64 threshold, usage;
- int size;
- int i, ret;
+ int i, size, ret;
ret = res_counter_memparse_write_strategy(args, &threshold);
if (ret)
return ret;
mutex_lock(&memcg->thresholds_lock);
+
if (type == _MEM)
- thresholds = memcg->thresholds;
+ thresholds = &memcg->thresholds;
else if (type == _MEMSWAP)
- thresholds = memcg->memsw_thresholds;
+ thresholds = &memcg->memsw_thresholds;
else
BUG();
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before adding a new one */
- if (thresholds)
+ if (thresholds->primary)
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
- if (thresholds)
- size = thresholds->size + 1;
- else
- size = 1;
+ size = thresholds->primary ? thresholds->primary->size + 1 : 1;
/* Allocate memory for new array of thresholds */
- thresholds_new = kmalloc(sizeof(*thresholds_new) +
- size * sizeof(struct mem_cgroup_threshold),
+ new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
GFP_KERNEL);
- if (!thresholds_new) {
+ if (!new) {
ret = -ENOMEM;
goto unlock;
}
- thresholds_new->size = size;
+ new->size = size;
/* Copy thresholds (if any) to new array */
- if (thresholds)
- memcpy(thresholds_new->entries, thresholds->entries,
- thresholds->size *
+ if (thresholds->primary) {
+ memcpy(new->entries, thresholds->primary->entries, (size - 1) *
sizeof(struct mem_cgroup_threshold));
+ }
+
/* Add new threshold */
- thresholds_new->entries[size - 1].eventfd = eventfd;
- thresholds_new->entries[size - 1].threshold = threshold;
+ new->entries[size - 1].eventfd = eventfd;
+ new->entries[size - 1].threshold = threshold;
/* Sort thresholds. Registering of new threshold isn't time-critical */
- sort(thresholds_new->entries, size,
- sizeof(struct mem_cgroup_threshold),
+ sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
compare_thresholds, NULL);
/* Find current threshold */
- thresholds_new->current_threshold = -1;
+ new->current_threshold = -1;
for (i = 0; i < size; i++) {
- if (thresholds_new->entries[i].threshold < usage) {
+ if (new->entries[i].threshold < usage) {
/*
- * thresholds_new->current_threshold will not be used
- * until rcu_assign_pointer(), so it's safe to increment
+ * new->current_threshold will not be used until
+ * rcu_assign_pointer(), so it's safe to increment
* it here.
*/
- ++thresholds_new->current_threshold;
+ ++new->current_threshold;
}
}
- if (type == _MEM)
- rcu_assign_pointer(memcg->thresholds, thresholds_new);
- else
- rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
+ /* Free old spare buffer and save old primary buffer as spare */
+ kfree(thresholds->spare);
+ thresholds->spare = thresholds->primary;
+
+ rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */
synchronize_rcu();
- /*
- * Free old preallocated buffer and use thresholds as new
- * preallocated buffer.
- */
- if (type == _MEM) {
- kfree(memcg->__thresholds);
- memcg->__thresholds = thresholds;
- } else {
- kfree(memcg->__memsw_thresholds);
- memcg->__memsw_thresholds = thresholds;
- }
unlock:
mutex_unlock(&memcg->thresholds_lock);
@@ -3609,17 +3598,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
+ struct mem_cgroup_thresholds *thresholds;
+ struct mem_cgroup_threshold_ary *new;
int type = MEMFILE_TYPE(cft->private);
u64 usage;
- int size = 0;
- int i, j;
+ int i, j, size;
mutex_lock(&memcg->thresholds_lock);
if (type == _MEM)
- thresholds = memcg->thresholds;
+ thresholds = &memcg->thresholds;
else if (type == _MEMSWAP)
- thresholds = memcg->memsw_thresholds;
+ thresholds = &memcg->memsw_thresholds;
else
BUG();
@@ -3635,53 +3624,45 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
/* Calculate new number of threshold */
- for (i = 0; i < thresholds->size; i++) {
- if (thresholds->entries[i].eventfd != eventfd)
+ size = 0;
+ for (i = 0; i < thresholds->primary->size; i++) {
+ if (thresholds->primary->entries[i].eventfd != eventfd)
size++;
}
- /* Use preallocated buffer for new array of thresholds */
- if (type == _MEM)
- thresholds_new = memcg->__thresholds;
- else
- thresholds_new = memcg->__memsw_thresholds;
+ new = thresholds->spare;
/* Set thresholds array to NULL if we don't have thresholds */
if (!size) {
- kfree(thresholds_new);
- thresholds_new = NULL;
+ kfree(new);
+ new = NULL;
goto swap_buffers;
}
- thresholds_new->size = size;
+ new->size = size;
/* Copy thresholds and find current threshold */
- thresholds_new->current_threshold = -1;
- for (i = 0, j = 0; i < thresholds->size; i++) {
- if (thresholds->entries[i].eventfd == eventfd)
+ new->current_threshold = -1;
+ for (i = 0, j = 0; i < thresholds->primary->size; i++) {
+ if (thresholds->primary->entries[i].eventfd == eventfd)
continue;
- thresholds_new->entries[j] = thresholds->entries[i];
- if (thresholds_new->entries[j].threshold < usage) {
+ new->entries[j] = thresholds->primary->entries[i];
+ if (new->entries[j].threshold < usage) {
/*
- * thresholds_new->current_threshold will not be used
+ * new->current_threshold will not be used
* until rcu_assign_pointer(), so it's safe to increment
* it here.
*/
- ++thresholds_new->current_threshold;
+ ++new->current_threshold;
}
j++;
}
swap_buffers:
- /* Swap thresholds array and preallocated buffer */
- if (type == _MEM) {
- memcg->__thresholds = thresholds;
- rcu_assign_pointer(memcg->thresholds, thresholds_new);
- } else {
- memcg->__memsw_thresholds = thresholds;
- rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
- }
+ /* Swap primary and spare array */
+ thresholds->spare = thresholds->primary;
+ rcu_assign_pointer(thresholds->primary, new);
/* To be sure that nobody uses thresholds */
synchronize_rcu();
--
1.7.0.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [RFC] [PATCH] memcg: cleanup memory thresholds
2010-05-09 0:11 [RFC] [PATCH] memcg: cleanup memory thresholds Kirill A. Shutemov
@ 2010-05-17 13:24 ` Kirill A. Shutemov
2010-05-17 20:17 ` Paul Menage
1 sibling, 0 replies; 3+ messages in thread
From: Kirill A. Shutemov @ 2010-05-17 13:24 UTC (permalink / raw)
To: linux-mm, containers
Cc: Kirill A. Shutemov, Andrew Morton, Phil Carmody, Balbir Singh,
Daisuke Nishimura, KAMEZAWA Hiroyuki, Paul Menage, Li Zefan,
linux-kernel
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=UTF-8, Size: 12567 bytes --]
Any comments?
On Sun, May 9, 2010 at 3:11 AM, Kirill A. Shutemov <kirill@shutemov.name> wrote:
> Introduce struct mem_cgroup_thresholds. It helps to reduce number of
> checks of thresholds type (memory or mem+swap).
>
> Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
> ---
> Â mm/memcontrol.c | Â 151 ++++++++++++++++++++++++-------------------------------
> Â 1 files changed, 66 insertions(+), 85 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index a6d2a4c..a6c6268 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -158,6 +158,18 @@ struct mem_cgroup_threshold_ary {
> Â Â Â Â /* Array of thresholds */
> Â Â Â Â struct mem_cgroup_threshold entries[0];
> Â };
> +
> +struct mem_cgroup_thresholds {
> + Â Â Â /* Primary thresholds array */
> + Â Â Â struct mem_cgroup_threshold_ary *primary;
> + Â Â Â /*
> + Â Â Â Â * Spare threshold array.
> + Â Â Â Â * It needed to make mem_cgroup_unregister_event() "never fail".
> + Â Â Â Â * It must be able to store at least primary->size - 1 entires.
> + Â Â Â Â */
> + Â Â Â struct mem_cgroup_threshold_ary *spare;
> +};
> +
> Â /* for OOM */
> Â struct mem_cgroup_eventfd_list {
> Â Â Â Â struct list_head list;
> @@ -224,20 +236,10 @@ struct mem_cgroup {
> Â Â Â Â struct mutex thresholds_lock;
>
> Â Â Â Â /* thresholds for memory usage. RCU-protected */
> - Â Â Â struct mem_cgroup_threshold_ary *thresholds;
> -
> - Â Â Â /*
> - Â Â Â Â * Preallocated buffer to be used in mem_cgroup_unregister_event()
> - Â Â Â Â * to make it "never fail".
> - Â Â Â Â * It must be able to store at least thresholds->size - 1 entries.
> - Â Â Â Â */
> - Â Â Â struct mem_cgroup_threshold_ary *__thresholds;
> + Â Â Â struct mem_cgroup_thresholds thresholds;
>
> Â Â Â Â /* thresholds for mem+swap usage. RCU-protected */
> - Â Â Â struct mem_cgroup_threshold_ary *memsw_thresholds;
> -
> - Â Â Â /* the same as __thresholds, but for memsw_thresholds */
> - Â Â Â struct mem_cgroup_threshold_ary *__memsw_thresholds;
> + Â Â Â struct mem_cgroup_thresholds memsw_thresholds;
>
> Â Â Â Â /* For oom notifier event fd */
> Â Â Â Â struct list_head oom_notify;
> @@ -3438,9 +3440,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
>
> Â Â Â Â rcu_read_lock();
> Â Â Â Â if (!swap)
> - Â Â Â Â Â Â Â t = rcu_dereference(memcg->thresholds);
> + Â Â Â Â Â Â Â t = rcu_dereference(memcg->thresholds.primary);
> Â Â Â Â else
> - Â Â Â Â Â Â Â t = rcu_dereference(memcg->memsw_thresholds);
> + Â Â Â Â Â Â Â t = rcu_dereference(memcg->memsw_thresholds.primary);
>
> Â Â Â Â if (!t)
> Â Â Â Â Â Â Â Â goto unlock;
> @@ -3514,91 +3516,78 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
> Â Â Â Â struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
> Â {
> Â Â Â Â struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
> - Â Â Â struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
> + Â Â Â struct mem_cgroup_thresholds *thresholds;
> + Â Â Â struct mem_cgroup_threshold_ary *new;
> Â Â Â Â int type = MEMFILE_TYPE(cft->private);
> Â Â Â Â u64 threshold, usage;
> - Â Â Â int size;
> - Â Â Â int i, ret;
> + Â Â Â int i, size, ret;
>
> Â Â Â Â ret = res_counter_memparse_write_strategy(args, &threshold);
> Â Â Â Â if (ret)
> Â Â Â Â Â Â Â Â return ret;
>
> Â Â Â Â mutex_lock(&memcg->thresholds_lock);
> +
> Â Â Â Â if (type == _MEM)
> - Â Â Â Â Â Â Â thresholds = memcg->thresholds;
> + Â Â Â Â Â Â Â thresholds = &memcg->thresholds;
> Â Â Â Â else if (type == _MEMSWAP)
> - Â Â Â Â Â Â Â thresholds = memcg->memsw_thresholds;
> + Â Â Â Â Â Â Â thresholds = &memcg->memsw_thresholds;
> Â Â Â Â else
> Â Â Â Â Â Â Â Â BUG();
>
> Â Â Â Â usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
>
> Â Â Â Â /* Check if a threshold crossed before adding a new one */
> - Â Â Â if (thresholds)
> + Â Â Â if (thresholds->primary)
> Â Â Â Â Â Â Â Â __mem_cgroup_threshold(memcg, type == _MEMSWAP);
>
> - Â Â Â if (thresholds)
> - Â Â Â Â Â Â Â size = thresholds->size + 1;
> - Â Â Â else
> - Â Â Â Â Â Â Â size = 1;
> + Â Â Â size = thresholds->primary ? thresholds->primary->size + 1 : 1;
>
> Â Â Â Â /* Allocate memory for new array of thresholds */
> - Â Â Â thresholds_new = kmalloc(sizeof(*thresholds_new) +
> - Â Â Â Â Â Â Â Â Â Â Â size * sizeof(struct mem_cgroup_threshold),
> + Â Â Â new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
> Â Â Â Â Â Â Â Â Â Â Â Â GFP_KERNEL);
> - Â Â Â if (!thresholds_new) {
> + Â Â Â if (!new) {
> Â Â Â Â Â Â Â Â ret = -ENOMEM;
> Â Â Â Â Â Â Â Â goto unlock;
> Â Â Â Â }
> - Â Â Â thresholds_new->size = size;
> + Â Â Â new->size = size;
>
> Â Â Â Â /* Copy thresholds (if any) to new array */
> - Â Â Â if (thresholds)
> - Â Â Â Â Â Â Â memcpy(thresholds_new->entries, thresholds->entries,
> - Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â thresholds->size *
> + Â Â Â if (thresholds->primary) {
> + Â Â Â Â Â Â Â memcpy(new->entries, thresholds->primary->entries, (size - 1) *
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â sizeof(struct mem_cgroup_threshold));
> + Â Â Â }
> +
> Â Â Â Â /* Add new threshold */
> - Â Â Â thresholds_new->entries[size - 1].eventfd = eventfd;
> - Â Â Â thresholds_new->entries[size - 1].threshold = threshold;
> + Â Â Â new->entries[size - 1].eventfd = eventfd;
> + Â Â Â new->entries[size - 1].threshold = threshold;
>
> Â Â Â Â /* Sort thresholds. Registering of new threshold isn't time-critical */
> - Â Â Â sort(thresholds_new->entries, size,
> - Â Â Â Â Â Â Â Â Â Â Â sizeof(struct mem_cgroup_threshold),
> + Â Â Â sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
> Â Â Â Â Â Â Â Â Â Â Â Â compare_thresholds, NULL);
>
> Â Â Â Â /* Find current threshold */
> - Â Â Â thresholds_new->current_threshold = -1;
> + Â Â Â new->current_threshold = -1;
> Â Â Â Â for (i = 0; i < size; i++) {
> - Â Â Â Â Â Â Â if (thresholds_new->entries[i].threshold < usage) {
> + Â Â Â Â Â Â Â if (new->entries[i].threshold < usage) {
> Â Â Â Â Â Â Â Â Â Â Â Â /*
> - Â Â Â Â Â Â Â Â Â Â Â Â * thresholds_new->current_threshold will not be used
> - Â Â Â Â Â Â Â Â Â Â Â Â * until rcu_assign_pointer(), so it's safe to increment
> + Â Â Â Â Â Â Â Â Â Â Â Â * new->current_threshold will not be used until
> + Â Â Â Â Â Â Â Â Â Â Â Â * rcu_assign_pointer(), so it's safe to increment
> Â Â Â Â Â Â Â Â Â Â Â Â * it here.
> Â Â Â Â Â Â Â Â Â Â Â Â */
> - Â Â Â Â Â Â Â Â Â Â Â ++thresholds_new->current_threshold;
> + Â Â Â Â Â Â Â Â Â Â Â ++new->current_threshold;
> Â Â Â Â Â Â Â Â }
> Â Â Â Â }
>
> - Â Â Â if (type == _MEM)
> - Â Â Â Â Â Â Â rcu_assign_pointer(memcg->thresholds, thresholds_new);
> - Â Â Â else
> - Â Â Â Â Â Â Â rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
> + Â Â Â /* Free old spare buffer and save old primary buffer as spare */
> + Â Â Â kfree(thresholds->spare);
> + Â Â Â thresholds->spare = thresholds->primary;
> +
> + Â Â Â rcu_assign_pointer(thresholds->primary, new);
>
> Â Â Â Â /* To be sure that nobody uses thresholds */
> Â Â Â Â synchronize_rcu();
>
> - Â Â Â /*
> - Â Â Â Â * Free old preallocated buffer and use thresholds as new
> - Â Â Â Â * preallocated buffer.
> - Â Â Â Â */
> - Â Â Â if (type == _MEM) {
> - Â Â Â Â Â Â Â kfree(memcg->__thresholds);
> - Â Â Â Â Â Â Â memcg->__thresholds = thresholds;
> - Â Â Â } else {
> - Â Â Â Â Â Â Â kfree(memcg->__memsw_thresholds);
> - Â Â Â Â Â Â Â memcg->__memsw_thresholds = thresholds;
> - Â Â Â }
> Â unlock:
> Â Â Â Â mutex_unlock(&memcg->thresholds_lock);
>
> @@ -3609,17 +3598,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
> Â Â Â Â struct cftype *cft, struct eventfd_ctx *eventfd)
> Â {
> Â Â Â Â struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
> - Â Â Â struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
> + Â Â Â struct mem_cgroup_thresholds *thresholds;
> + Â Â Â struct mem_cgroup_threshold_ary *new;
> Â Â Â Â int type = MEMFILE_TYPE(cft->private);
> Â Â Â Â u64 usage;
> - Â Â Â int size = 0;
> - Â Â Â int i, j;
> + Â Â Â int i, j, size;
>
> Â Â Â Â mutex_lock(&memcg->thresholds_lock);
> Â Â Â Â if (type == _MEM)
> - Â Â Â Â Â Â Â thresholds = memcg->thresholds;
> + Â Â Â Â Â Â Â thresholds = &memcg->thresholds;
> Â Â Â Â else if (type == _MEMSWAP)
> - Â Â Â Â Â Â Â thresholds = memcg->memsw_thresholds;
> + Â Â Â Â Â Â Â thresholds = &memcg->memsw_thresholds;
> Â Â Â Â else
> Â Â Â Â Â Â Â Â BUG();
>
> @@ -3635,53 +3624,45 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
> Â Â Â Â __mem_cgroup_threshold(memcg, type == _MEMSWAP);
>
> Â Â Â Â /* Calculate new number of threshold */
> - Â Â Â for (i = 0; i < thresholds->size; i++) {
> - Â Â Â Â Â Â Â if (thresholds->entries[i].eventfd != eventfd)
> + Â Â Â size = 0;
> + Â Â Â for (i = 0; i < thresholds->primary->size; i++) {
> + Â Â Â Â Â Â Â if (thresholds->primary->entries[i].eventfd != eventfd)
> Â Â Â Â Â Â Â Â Â Â Â Â size++;
> Â Â Â Â }
>
> - Â Â Â /* Use preallocated buffer for new array of thresholds */
> - Â Â Â if (type == _MEM)
> - Â Â Â Â Â Â Â thresholds_new = memcg->__thresholds;
> - Â Â Â else
> - Â Â Â Â Â Â Â thresholds_new = memcg->__memsw_thresholds;
> + Â Â Â new = thresholds->spare;
>
> Â Â Â Â /* Set thresholds array to NULL if we don't have thresholds */
> Â Â Â Â if (!size) {
> - Â Â Â Â Â Â Â kfree(thresholds_new);
> - Â Â Â Â Â Â Â thresholds_new = NULL;
> + Â Â Â Â Â Â Â kfree(new);
> + Â Â Â Â Â Â Â new = NULL;
> Â Â Â Â Â Â Â Â goto swap_buffers;
> Â Â Â Â }
>
> - Â Â Â thresholds_new->size = size;
> + Â Â Â new->size = size;
>
> Â Â Â Â /* Copy thresholds and find current threshold */
> - Â Â Â thresholds_new->current_threshold = -1;
> - Â Â Â for (i = 0, j = 0; i < thresholds->size; i++) {
> - Â Â Â Â Â Â Â if (thresholds->entries[i].eventfd == eventfd)
> + Â Â Â new->current_threshold = -1;
> + Â Â Â for (i = 0, j = 0; i < thresholds->primary->size; i++) {
> + Â Â Â Â Â Â Â if (thresholds->primary->entries[i].eventfd == eventfd)
> Â Â Â Â Â Â Â Â Â Â Â Â continue;
>
> - Â Â Â Â Â Â Â thresholds_new->entries[j] = thresholds->entries[i];
> - Â Â Â Â Â Â Â if (thresholds_new->entries[j].threshold < usage) {
> + Â Â Â Â Â Â Â new->entries[j] = thresholds->primary->entries[i];
> + Â Â Â Â Â Â Â if (new->entries[j].threshold < usage) {
> Â Â Â Â Â Â Â Â Â Â Â Â /*
> - Â Â Â Â Â Â Â Â Â Â Â Â * thresholds_new->current_threshold will not be used
> + Â Â Â Â Â Â Â Â Â Â Â Â * new->current_threshold will not be used
> Â Â Â Â Â Â Â Â Â Â Â Â * until rcu_assign_pointer(), so it's safe to increment
> Â Â Â Â Â Â Â Â Â Â Â Â * it here.
> Â Â Â Â Â Â Â Â Â Â Â Â */
> - Â Â Â Â Â Â Â Â Â Â Â ++thresholds_new->current_threshold;
> + Â Â Â Â Â Â Â Â Â Â Â ++new->current_threshold;
> Â Â Â Â Â Â Â Â }
> Â Â Â Â Â Â Â Â j++;
> Â Â Â Â }
>
> Â swap_buffers:
> - Â Â Â /* Swap thresholds array and preallocated buffer */
> - Â Â Â if (type == _MEM) {
> - Â Â Â Â Â Â Â memcg->__thresholds = thresholds;
> - Â Â Â Â Â Â Â rcu_assign_pointer(memcg->thresholds, thresholds_new);
> - Â Â Â } else {
> - Â Â Â Â Â Â Â memcg->__memsw_thresholds = thresholds;
> - Â Â Â Â Â Â Â rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
> - Â Â Â }
> + Â Â Â /* Swap primary and spare array */
> + Â Â Â thresholds->spare = thresholds->primary;
> + Â Â Â rcu_assign_pointer(thresholds->primary, new);
>
> Â Â Â Â /* To be sure that nobody uses thresholds */
> Â Â Â Â synchronize_rcu();
> --
> 1.7.0.4
>
>
N§²æìr¸zǧu©²Æ {\béì¹»\x1c®&Þ)îÆi¢Ø^nr¶Ý¢j$½§$¢¸\x05¢¹¨è§~'.)îÄÃ,yèm¶ÿÃ\f%{±j+ðèצj)Z·
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [RFC] [PATCH] memcg: cleanup memory thresholds
2010-05-09 0:11 [RFC] [PATCH] memcg: cleanup memory thresholds Kirill A. Shutemov
2010-05-17 13:24 ` Kirill A. Shutemov
@ 2010-05-17 20:17 ` Paul Menage
1 sibling, 0 replies; 3+ messages in thread
From: Paul Menage @ 2010-05-17 20:17 UTC (permalink / raw)
To: Kirill A. Shutemov
Cc: linux-mm, containers, Andrew Morton, Phil Carmody, Balbir Singh,
Daisuke Nishimura, KAMEZAWA Hiroyuki, Li Zefan, linux-kernel
On Sat, May 8, 2010 at 5:11 PM, Kirill A. Shutemov <kirill@shutemov.name> wrote:
> Introduce struct mem_cgroup_thresholds. It helps to reduce number of
> checks of thresholds type (memory or mem+swap).
>
> Signed-off-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: Paul Menage <menage@google.com>
Thanks,
Paul
> ---
> mm/memcontrol.c | 151 ++++++++++++++++++++++++-------------------------------
> 1 files changed, 66 insertions(+), 85 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index a6d2a4c..a6c6268 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -158,6 +158,18 @@ struct mem_cgroup_threshold_ary {
> /* Array of thresholds */
> struct mem_cgroup_threshold entries[0];
> };
> +
> +struct mem_cgroup_thresholds {
> + /* Primary thresholds array */
> + struct mem_cgroup_threshold_ary *primary;
> + /*
> + * Spare threshold array.
> + * It needed to make mem_cgroup_unregister_event() "never fail".
> + * It must be able to store at least primary->size - 1 entires.
> + */
> + struct mem_cgroup_threshold_ary *spare;
> +};
> +
> /* for OOM */
> struct mem_cgroup_eventfd_list {
> struct list_head list;
> @@ -224,20 +236,10 @@ struct mem_cgroup {
> struct mutex thresholds_lock;
>
> /* thresholds for memory usage. RCU-protected */
> - struct mem_cgroup_threshold_ary *thresholds;
> -
> - /*
> - * Preallocated buffer to be used in mem_cgroup_unregister_event()
> - * to make it "never fail".
> - * It must be able to store at least thresholds->size - 1 entries.
> - */
> - struct mem_cgroup_threshold_ary *__thresholds;
> + struct mem_cgroup_thresholds thresholds;
>
> /* thresholds for mem+swap usage. RCU-protected */
> - struct mem_cgroup_threshold_ary *memsw_thresholds;
> -
> - /* the same as __thresholds, but for memsw_thresholds */
> - struct mem_cgroup_threshold_ary *__memsw_thresholds;
> + struct mem_cgroup_thresholds memsw_thresholds;
>
> /* For oom notifier event fd */
> struct list_head oom_notify;
> @@ -3438,9 +3440,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
>
> rcu_read_lock();
> if (!swap)
> - t = rcu_dereference(memcg->thresholds);
> + t = rcu_dereference(memcg->thresholds.primary);
> else
> - t = rcu_dereference(memcg->memsw_thresholds);
> + t = rcu_dereference(memcg->memsw_thresholds.primary);
>
> if (!t)
> goto unlock;
> @@ -3514,91 +3516,78 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
> struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
> {
> struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
> - struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
> + struct mem_cgroup_thresholds *thresholds;
> + struct mem_cgroup_threshold_ary *new;
> int type = MEMFILE_TYPE(cft->private);
> u64 threshold, usage;
> - int size;
> - int i, ret;
> + int i, size, ret;
>
> ret = res_counter_memparse_write_strategy(args, &threshold);
> if (ret)
> return ret;
>
> mutex_lock(&memcg->thresholds_lock);
> +
> if (type == _MEM)
> - thresholds = memcg->thresholds;
> + thresholds = &memcg->thresholds;
> else if (type == _MEMSWAP)
> - thresholds = memcg->memsw_thresholds;
> + thresholds = &memcg->memsw_thresholds;
> else
> BUG();
>
> usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
>
> /* Check if a threshold crossed before adding a new one */
> - if (thresholds)
> + if (thresholds->primary)
> __mem_cgroup_threshold(memcg, type == _MEMSWAP);
>
> - if (thresholds)
> - size = thresholds->size + 1;
> - else
> - size = 1;
> + size = thresholds->primary ? thresholds->primary->size + 1 : 1;
>
> /* Allocate memory for new array of thresholds */
> - thresholds_new = kmalloc(sizeof(*thresholds_new) +
> - size * sizeof(struct mem_cgroup_threshold),
> + new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
> GFP_KERNEL);
> - if (!thresholds_new) {
> + if (!new) {
> ret = -ENOMEM;
> goto unlock;
> }
> - thresholds_new->size = size;
> + new->size = size;
>
> /* Copy thresholds (if any) to new array */
> - if (thresholds)
> - memcpy(thresholds_new->entries, thresholds->entries,
> - thresholds->size *
> + if (thresholds->primary) {
> + memcpy(new->entries, thresholds->primary->entries, (size - 1) *
> sizeof(struct mem_cgroup_threshold));
> + }
> +
> /* Add new threshold */
> - thresholds_new->entries[size - 1].eventfd = eventfd;
> - thresholds_new->entries[size - 1].threshold = threshold;
> + new->entries[size - 1].eventfd = eventfd;
> + new->entries[size - 1].threshold = threshold;
>
> /* Sort thresholds. Registering of new threshold isn't time-critical */
> - sort(thresholds_new->entries, size,
> - sizeof(struct mem_cgroup_threshold),
> + sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
> compare_thresholds, NULL);
>
> /* Find current threshold */
> - thresholds_new->current_threshold = -1;
> + new->current_threshold = -1;
> for (i = 0; i < size; i++) {
> - if (thresholds_new->entries[i].threshold < usage) {
> + if (new->entries[i].threshold < usage) {
> /*
> - * thresholds_new->current_threshold will not be used
> - * until rcu_assign_pointer(), so it's safe to increment
> + * new->current_threshold will not be used until
> + * rcu_assign_pointer(), so it's safe to increment
> * it here.
> */
> - ++thresholds_new->current_threshold;
> + ++new->current_threshold;
> }
> }
>
> - if (type == _MEM)
> - rcu_assign_pointer(memcg->thresholds, thresholds_new);
> - else
> - rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
> + /* Free old spare buffer and save old primary buffer as spare */
> + kfree(thresholds->spare);
> + thresholds->spare = thresholds->primary;
> +
> + rcu_assign_pointer(thresholds->primary, new);
>
> /* To be sure that nobody uses thresholds */
> synchronize_rcu();
>
> - /*
> - * Free old preallocated buffer and use thresholds as new
> - * preallocated buffer.
> - */
> - if (type == _MEM) {
> - kfree(memcg->__thresholds);
> - memcg->__thresholds = thresholds;
> - } else {
> - kfree(memcg->__memsw_thresholds);
> - memcg->__memsw_thresholds = thresholds;
> - }
> unlock:
> mutex_unlock(&memcg->thresholds_lock);
>
> @@ -3609,17 +3598,17 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
> struct cftype *cft, struct eventfd_ctx *eventfd)
> {
> struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
> - struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
> + struct mem_cgroup_thresholds *thresholds;
> + struct mem_cgroup_threshold_ary *new;
> int type = MEMFILE_TYPE(cft->private);
> u64 usage;
> - int size = 0;
> - int i, j;
> + int i, j, size;
>
> mutex_lock(&memcg->thresholds_lock);
> if (type == _MEM)
> - thresholds = memcg->thresholds;
> + thresholds = &memcg->thresholds;
> else if (type == _MEMSWAP)
> - thresholds = memcg->memsw_thresholds;
> + thresholds = &memcg->memsw_thresholds;
> else
> BUG();
>
> @@ -3635,53 +3624,45 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
> __mem_cgroup_threshold(memcg, type == _MEMSWAP);
>
> /* Calculate new number of threshold */
> - for (i = 0; i < thresholds->size; i++) {
> - if (thresholds->entries[i].eventfd != eventfd)
> + size = 0;
> + for (i = 0; i < thresholds->primary->size; i++) {
> + if (thresholds->primary->entries[i].eventfd != eventfd)
> size++;
> }
>
> - /* Use preallocated buffer for new array of thresholds */
> - if (type == _MEM)
> - thresholds_new = memcg->__thresholds;
> - else
> - thresholds_new = memcg->__memsw_thresholds;
> + new = thresholds->spare;
>
> /* Set thresholds array to NULL if we don't have thresholds */
> if (!size) {
> - kfree(thresholds_new);
> - thresholds_new = NULL;
> + kfree(new);
> + new = NULL;
> goto swap_buffers;
> }
>
> - thresholds_new->size = size;
> + new->size = size;
>
> /* Copy thresholds and find current threshold */
> - thresholds_new->current_threshold = -1;
> - for (i = 0, j = 0; i < thresholds->size; i++) {
> - if (thresholds->entries[i].eventfd == eventfd)
> + new->current_threshold = -1;
> + for (i = 0, j = 0; i < thresholds->primary->size; i++) {
> + if (thresholds->primary->entries[i].eventfd == eventfd)
> continue;
>
> - thresholds_new->entries[j] = thresholds->entries[i];
> - if (thresholds_new->entries[j].threshold < usage) {
> + new->entries[j] = thresholds->primary->entries[i];
> + if (new->entries[j].threshold < usage) {
> /*
> - * thresholds_new->current_threshold will not be used
> + * new->current_threshold will not be used
> * until rcu_assign_pointer(), so it's safe to increment
> * it here.
> */
> - ++thresholds_new->current_threshold;
> + ++new->current_threshold;
> }
> j++;
> }
>
> swap_buffers:
> - /* Swap thresholds array and preallocated buffer */
> - if (type == _MEM) {
> - memcg->__thresholds = thresholds;
> - rcu_assign_pointer(memcg->thresholds, thresholds_new);
> - } else {
> - memcg->__memsw_thresholds = thresholds;
> - rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
> - }
> + /* Swap primary and spare array */
> + thresholds->spare = thresholds->primary;
> + rcu_assign_pointer(thresholds->primary, new);
>
> /* To be sure that nobody uses thresholds */
> synchronize_rcu();
> --
> 1.7.0.4
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2010-05-17 20:17 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-09 0:11 [RFC] [PATCH] memcg: cleanup memory thresholds Kirill A. Shutemov
2010-05-17 13:24 ` Kirill A. Shutemov
2010-05-17 20:17 ` Paul Menage
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).