linux-raid.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH -next] md: simplify md_seq_ops
@ 2023-09-11  6:50 Yu Kuai
  2023-09-11 14:05 ` Mariusz Tkaczyk
  0 siblings, 1 reply; 6+ messages in thread
From: Yu Kuai @ 2023-09-11  6:50 UTC (permalink / raw)
  To: mariusz.tkaczyk, song
  Cc: linux-raid, linux-kernel, yukuai3, yukuai1, yi.zhang, yangerkun

From: Yu Kuai <yukuai3@huawei.com>

Use seq_list_start/next/stop() directly. Move printing "Personalities"
to md_sep_start() and "unsed devices" to md_seq_stop().

Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/md/md.c | 124 ++++++++++++------------------------------------
 1 file changed, 31 insertions(+), 93 deletions(-)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0fe7ab6e8ab9..9c1155042335 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
 	return 1;
 }
 
-static void *md_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	struct list_head *tmp;
-	loff_t l = *pos;
-	struct mddev *mddev;
-
-	if (l == 0x10000) {
-		++*pos;
-		return (void *)2;
-	}
-	if (l > 0x10000)
-		return NULL;
-	if (!l--)
-		/* header */
-		return (void*)1;
-
-	spin_lock(&all_mddevs_lock);
-	list_for_each(tmp,&all_mddevs)
-		if (!l--) {
-			mddev = list_entry(tmp, struct mddev, all_mddevs);
-			if (!mddev_get(mddev))
-				continue;
-			spin_unlock(&all_mddevs_lock);
-			return mddev;
-		}
-	spin_unlock(&all_mddevs_lock);
-	if (!l--)
-		return (void*)2;/* tail */
-	return NULL;
-}
-
-static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	struct list_head *tmp;
-	struct mddev *next_mddev, *mddev = v;
-	struct mddev *to_put = NULL;
-
-	++*pos;
-	if (v == (void*)2)
-		return NULL;
-
-	spin_lock(&all_mddevs_lock);
-	if (v == (void*)1) {
-		tmp = all_mddevs.next;
-	} else {
-		to_put = mddev;
-		tmp = mddev->all_mddevs.next;
-	}
-
-	for (;;) {
-		if (tmp == &all_mddevs) {
-			next_mddev = (void*)2;
-			*pos = 0x10000;
-			break;
-		}
-		next_mddev = list_entry(tmp, struct mddev, all_mddevs);
-		if (mddev_get(next_mddev))
-			break;
-		mddev = next_mddev;
-		tmp = mddev->all_mddevs.next;
-	}
-	spin_unlock(&all_mddevs_lock);
-
-	if (to_put)
-		mddev_put(mddev);
-	return next_mddev;
-
-}
-
-static void md_seq_stop(struct seq_file *seq, void *v)
-{
-	struct mddev *mddev = v;
-
-	if (mddev && v != (void*)1 && v != (void*)2)
-		mddev_put(mddev);
-}
-
 static int md_seq_show(struct seq_file *seq, void *v)
 {
-	struct mddev *mddev = v;
+	struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
 	sector_t sectors;
 	struct md_rdev *rdev;
 
-	if (v == (void*)1) {
-		struct md_personality *pers;
-		seq_printf(seq, "Personalities : ");
-		spin_lock(&pers_lock);
-		list_for_each_entry(pers, &pers_list, list)
-			seq_printf(seq, "[%s] ", pers->name);
-
-		spin_unlock(&pers_lock);
-		seq_printf(seq, "\n");
-		seq->poll_event = atomic_read(&md_event_count);
-		return 0;
-	}
-	if (v == (void*)2) {
-		status_unused(seq);
+	if (test_bit(MD_DELETED, &mddev->flags))
 		return 0;
-	}
 
 	spin_lock(&mddev->lock);
 	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
@@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v)
 	return 0;
 }
 
+static void *md_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct md_personality *pers;
+
+	seq_puts(seq, "Personalities : ");
+	spin_lock(&pers_lock);
+	list_for_each_entry(pers, &pers_list, list)
+		seq_printf(seq, "[%s] ", pers->name);
+
+	spin_unlock(&pers_lock);
+	seq_puts(seq, "\n");
+	seq->poll_event = atomic_read(&md_event_count);
+
+	spin_lock(&all_mddevs_lock);
+
+	return seq_list_start(&all_mddevs, *pos);
+}
+
+static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &all_mddevs, pos);
+}
+
+static void md_seq_stop(struct seq_file *seq, void *v)
+{
+	status_unused(seq);
+	spin_unlock(&all_mddevs_lock);
+}
+
 static const struct seq_operations md_seq_ops = {
 	.start  = md_seq_start,
 	.next   = md_seq_next,
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH -next] md: simplify md_seq_ops
  2023-09-11  6:50 [PATCH -next] md: simplify md_seq_ops Yu Kuai
@ 2023-09-11 14:05 ` Mariusz Tkaczyk
  2023-09-12  1:02   ` Yu Kuai
  0 siblings, 1 reply; 6+ messages in thread
From: Mariusz Tkaczyk @ 2023-09-11 14:05 UTC (permalink / raw)
  To: Yu Kuai; +Cc: song, linux-raid, linux-kernel, yukuai3, yi.zhang, yangerkun

On Mon, 11 Sep 2023 14:50:10 +0800
Yu Kuai <yukuai1@huaweicloud.com> wrote:

> From: Yu Kuai <yukuai3@huawei.com>
> 
> Use seq_list_start/next/stop() directly. Move printing "Personalities"
> to md_sep_start() and "unsed devices" to md_seq_stop().
> 
> Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ---
>  drivers/md/md.c | 124 ++++++++++++------------------------------------
>  1 file changed, 31 insertions(+), 93 deletions(-)
> 
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 0fe7ab6e8ab9..9c1155042335 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq,
> struct mddev *mddev) return 1;
>  }
>  
> -static void *md_seq_start(struct seq_file *seq, loff_t *pos)
> -{
> -	struct list_head *tmp;
> -	loff_t l = *pos;
> -	struct mddev *mddev;
> -
> -	if (l == 0x10000) {
> -		++*pos;
> -		return (void *)2;
> -	}
> -	if (l > 0x10000)
> -		return NULL;
> -	if (!l--)
> -		/* header */
> -		return (void*)1;
> -
> -	spin_lock(&all_mddevs_lock);
> -	list_for_each(tmp,&all_mddevs)
> -		if (!l--) {
> -			mddev = list_entry(tmp, struct mddev, all_mddevs);
> -			if (!mddev_get(mddev))
> -				continue;
> -			spin_unlock(&all_mddevs_lock);
> -			return mddev;
> -		}
> -	spin_unlock(&all_mddevs_lock);
> -	if (!l--)
> -		return (void*)2;/* tail */
> -	return NULL;
> -}
> -
> -static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
> -{
> -	struct list_head *tmp;
> -	struct mddev *next_mddev, *mddev = v;
> -	struct mddev *to_put = NULL;
> -
> -	++*pos;
> -	if (v == (void*)2)
> -		return NULL;
> -
> -	spin_lock(&all_mddevs_lock);
> -	if (v == (void*)1) {
> -		tmp = all_mddevs.next;
> -	} else {
> -		to_put = mddev;
> -		tmp = mddev->all_mddevs.next;
> -	}
> -
> -	for (;;) {
> -		if (tmp == &all_mddevs) {
> -			next_mddev = (void*)2;
> -			*pos = 0x10000;
> -			break;
> -		}
> -		next_mddev = list_entry(tmp, struct mddev, all_mddevs);
> -		if (mddev_get(next_mddev))
> -			break;
> -		mddev = next_mddev;
> -		tmp = mddev->all_mddevs.next;
> -	}
> -	spin_unlock(&all_mddevs_lock);
> -
> -	if (to_put)
> -		mddev_put(mddev);
> -	return next_mddev;
> -
> -}
> -
> -static void md_seq_stop(struct seq_file *seq, void *v)
> -{
> -	struct mddev *mddev = v;
> -
> -	if (mddev && v != (void*)1 && v != (void*)2)
> -		mddev_put(mddev);
> -}
> -
>  static int md_seq_show(struct seq_file *seq, void *v)
>  {
> -	struct mddev *mddev = v;
> +	struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
>  	sector_t sectors;
>  	struct md_rdev *rdev;
>  
> -	if (v == (void*)1) {
> -		struct md_personality *pers;
> -		seq_printf(seq, "Personalities : ");
> -		spin_lock(&pers_lock);
> -		list_for_each_entry(pers, &pers_list, list)
> -			seq_printf(seq, "[%s] ", pers->name);
> -
> -		spin_unlock(&pers_lock);
> -		seq_printf(seq, "\n");
> -		seq->poll_event = atomic_read(&md_event_count);
> -		return 0;
> -	}
> -	if (v == (void*)2) {
> -		status_unused(seq);
> +	if (test_bit(MD_DELETED, &mddev->flags))
>  		return 0;
> -	}
>  
>  	spin_lock(&mddev->lock);
>  	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
> @@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v)
>  	return 0;
>  }
>  
> +static void *md_seq_start(struct seq_file *seq, loff_t *pos)
> +{
> +	struct md_personality *pers;
> +
> +	seq_puts(seq, "Personalities : ");
> +	spin_lock(&pers_lock);
> +	list_for_each_entry(pers, &pers_list, list)
> +		seq_printf(seq, "[%s] ", pers->name);
> +
> +	spin_unlock(&pers_lock);
> +	seq_puts(seq, "\n");
> +	seq->poll_event = atomic_read(&md_event_count);
> +
> +	spin_lock(&all_mddevs_lock);

I would prefer to increase "active" instead holding lock when enumerating over
the devices. the main reason is that parsing mdstat is implemented in mdadm, so
it could kind of blocker action- for example mdmon follows mdstat so it is read
frequently. The time of getting other actions done can highly increase because
every open or sysfs_read/write requires this lock.

> +
> +	return seq_list_start(&all_mddevs, *pos);
> +}
> +
> +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
> +{
> +	return seq_list_next(v, &all_mddevs, pos);
> +}
Can it be so simple? Why previous versions takes care of holding "(void)*1" and
"(void)*2" then? Could you elaborate?

> +
> +static void md_seq_stop(struct seq_file *seq, void *v)
> +{
> +	status_unused(seq);
> +	spin_unlock(&all_mddevs_lock);
> +}
> +
>  static const struct seq_operations md_seq_ops = {
>  	.start  = md_seq_start,
>  	.next   = md_seq_next,

Thanks,
Mariusz

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH -next] md: simplify md_seq_ops
  2023-09-11 14:05 ` Mariusz Tkaczyk
@ 2023-09-12  1:02   ` Yu Kuai
  2023-09-13 10:32     ` Mariusz Tkaczyk
  2023-09-22 21:22     ` Song Liu
  0 siblings, 2 replies; 6+ messages in thread
From: Yu Kuai @ 2023-09-12  1:02 UTC (permalink / raw)
  To: Mariusz Tkaczyk, Yu Kuai
  Cc: song, linux-raid, linux-kernel, yi.zhang, yangerkun, yukuai (C)

Hi,

在 2023/09/11 22:05, Mariusz Tkaczyk 写道:
> On Mon, 11 Sep 2023 14:50:10 +0800
> Yu Kuai <yukuai1@huaweicloud.com> wrote:
> 
>> From: Yu Kuai <yukuai3@huawei.com>
>>
>> Use seq_list_start/next/stop() directly. Move printing "Personalities"
>> to md_sep_start() and "unsed devices" to md_seq_stop().
>>
>> Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
>> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
>> ---
>>   drivers/md/md.c | 124 ++++++++++++------------------------------------
>>   1 file changed, 31 insertions(+), 93 deletions(-)
>>
>> diff --git a/drivers/md/md.c b/drivers/md/md.c
>> index 0fe7ab6e8ab9..9c1155042335 100644
>> --- a/drivers/md/md.c
>> +++ b/drivers/md/md.c
>> @@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq,
>> struct mddev *mddev) return 1;
>>   }
>>   
>> -static void *md_seq_start(struct seq_file *seq, loff_t *pos)
>> -{
>> -	struct list_head *tmp;
>> -	loff_t l = *pos;
>> -	struct mddev *mddev;
>> -
>> -	if (l == 0x10000) {
>> -		++*pos;
>> -		return (void *)2;
>> -	}
>> -	if (l > 0x10000)
>> -		return NULL;
>> -	if (!l--)
>> -		/* header */
>> -		return (void*)1;
>> -
>> -	spin_lock(&all_mddevs_lock);
>> -	list_for_each(tmp,&all_mddevs)
>> -		if (!l--) {
>> -			mddev = list_entry(tmp, struct mddev, all_mddevs);
>> -			if (!mddev_get(mddev))
>> -				continue;
>> -			spin_unlock(&all_mddevs_lock);
>> -			return mddev;
>> -		}
>> -	spin_unlock(&all_mddevs_lock);
>> -	if (!l--)
>> -		return (void*)2;/* tail */
>> -	return NULL;
>> -}
>> -
>> -static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
>> -{
>> -	struct list_head *tmp;
>> -	struct mddev *next_mddev, *mddev = v;
>> -	struct mddev *to_put = NULL;
>> -
>> -	++*pos;
>> -	if (v == (void*)2)
>> -		return NULL;
>> -
>> -	spin_lock(&all_mddevs_lock);
>> -	if (v == (void*)1) {
>> -		tmp = all_mddevs.next;
>> -	} else {
>> -		to_put = mddev;
>> -		tmp = mddev->all_mddevs.next;
>> -	}
>> -
>> -	for (;;) {
>> -		if (tmp == &all_mddevs) {
>> -			next_mddev = (void*)2;
>> -			*pos = 0x10000;
>> -			break;
>> -		}
>> -		next_mddev = list_entry(tmp, struct mddev, all_mddevs);
>> -		if (mddev_get(next_mddev))
>> -			break;
>> -		mddev = next_mddev;
>> -		tmp = mddev->all_mddevs.next;
>> -	}
>> -	spin_unlock(&all_mddevs_lock);
>> -
>> -	if (to_put)
>> -		mddev_put(mddev);
>> -	return next_mddev;
>> -
>> -}
>> -
>> -static void md_seq_stop(struct seq_file *seq, void *v)
>> -{
>> -	struct mddev *mddev = v;
>> -
>> -	if (mddev && v != (void*)1 && v != (void*)2)
>> -		mddev_put(mddev);
>> -}
>> -
>>   static int md_seq_show(struct seq_file *seq, void *v)
>>   {
>> -	struct mddev *mddev = v;
>> +	struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
>>   	sector_t sectors;
>>   	struct md_rdev *rdev;
>>   
>> -	if (v == (void*)1) {
>> -		struct md_personality *pers;
>> -		seq_printf(seq, "Personalities : ");
>> -		spin_lock(&pers_lock);
>> -		list_for_each_entry(pers, &pers_list, list)
>> -			seq_printf(seq, "[%s] ", pers->name);
>> -
>> -		spin_unlock(&pers_lock);
>> -		seq_printf(seq, "\n");
>> -		seq->poll_event = atomic_read(&md_event_count);
>> -		return 0;
>> -	}
>> -	if (v == (void*)2) {
>> -		status_unused(seq);
>> +	if (test_bit(MD_DELETED, &mddev->flags))
>>   		return 0;
>> -	}
>>   
>>   	spin_lock(&mddev->lock);
>>   	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
>> @@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v)
>>   	return 0;
>>   }
>>   
>> +static void *md_seq_start(struct seq_file *seq, loff_t *pos)
>> +{
>> +	struct md_personality *pers;
>> +
>> +	seq_puts(seq, "Personalities : ");
>> +	spin_lock(&pers_lock);
>> +	list_for_each_entry(pers, &pers_list, list)
>> +		seq_printf(seq, "[%s] ", pers->name);
>> +
>> +	spin_unlock(&pers_lock);
>> +	seq_puts(seq, "\n");
>> +	seq->poll_event = atomic_read(&md_event_count);
>> +
>> +	spin_lock(&all_mddevs_lock);
> 
> I would prefer to increase "active" instead holding lock when enumerating over
> the devices. the main reason is that parsing mdstat is implemented in mdadm, so
> it could kind of blocker action- for example mdmon follows mdstat so it is read
> frequently. The time of getting other actions done can highly increase because
> every open or sysfs_read/write requires this lock.
> 
>> +
>> +	return seq_list_start(&all_mddevs, *pos);
>> +}
>> +
>> +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
>> +{
>> +	return seq_list_next(v, &all_mddevs, pos);
>> +}
> Can it be so simple? Why previous versions takes care of holding "(void)*1" and
> "(void)*2" then? Could you elaborate?

"1" means printing "Personalities", which is now moved to md_seq_start,
and "2" means printing "unsed devices" which is now moved to
md_seq_stop. And now md_seq_next is only used to iterate the mddev list.

Thanks,
Kuai

> 
>> +
>> +static void md_seq_stop(struct seq_file *seq, void *v)
>> +{
>> +	status_unused(seq);
>> +	spin_unlock(&all_mddevs_lock);
>> +}
>> +
>>   static const struct seq_operations md_seq_ops = {
>>   	.start  = md_seq_start,
>>   	.next   = md_seq_next,
> 
> Thanks,
> Mariusz
> 
> .
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH -next] md: simplify md_seq_ops
  2023-09-12  1:02   ` Yu Kuai
@ 2023-09-13 10:32     ` Mariusz Tkaczyk
  2023-09-22 21:22     ` Song Liu
  1 sibling, 0 replies; 6+ messages in thread
From: Mariusz Tkaczyk @ 2023-09-13 10:32 UTC (permalink / raw)
  To: Yu Kuai; +Cc: song, linux-raid, linux-kernel, yi.zhang, yangerkun, yukuai (C)

On Tue, 12 Sep 2023 09:02:19 +0800
Yu Kuai <yukuai1@huaweicloud.com> wrote:

> >> +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
> >> +{
> >> +	return seq_list_next(v, &all_mddevs, pos);
> >> +}  
> > Can it be so simple? Why previous versions takes care of holding "(void)*1"
> > and "(void)*2" then? Could you elaborate?  
> 
> "1" means printing "Personalities", which is now moved to md_seq_start,
> and "2" means printing "unsed devices" which is now moved to
> md_seq_stop. And now md_seq_next is only used to iterate the mddev list.
> 

Ok, LGTM.

Mariusz

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH -next] md: simplify md_seq_ops
  2023-09-12  1:02   ` Yu Kuai
  2023-09-13 10:32     ` Mariusz Tkaczyk
@ 2023-09-22 21:22     ` Song Liu
  2023-09-25  1:07       ` Yu Kuai
  1 sibling, 1 reply; 6+ messages in thread
From: Song Liu @ 2023-09-22 21:22 UTC (permalink / raw)
  To: Yu Kuai
  Cc: Mariusz Tkaczyk, linux-raid, linux-kernel, yi.zhang, yangerkun,
	yukuai (C)

On Mon, Sep 11, 2023 at 6:02 PM Yu Kuai <yukuai1@huaweicloud.com> wrote:
>
[...]
> >> +static void *md_seq_start(struct seq_file *seq, loff_t *pos)
> >> +{
> >> +    struct md_personality *pers;
> >> +
> >> +    seq_puts(seq, "Personalities : ");
> >> +    spin_lock(&pers_lock);
> >> +    list_for_each_entry(pers, &pers_list, list)
> >> +            seq_printf(seq, "[%s] ", pers->name);
> >> +
> >> +    spin_unlock(&pers_lock);
> >> +    seq_puts(seq, "\n");
> >> +    seq->poll_event = atomic_read(&md_event_count);
> >> +
> >> +    spin_lock(&all_mddevs_lock);
> >
> > I would prefer to increase "active" instead holding lock when enumerating over
> > the devices. the main reason is that parsing mdstat is implemented in mdadm, so
> > it could kind of blocker action- for example mdmon follows mdstat so it is read
> > frequently. The time of getting other actions done can highly increase because
> > every open or sysfs_read/write requires this lock.

Existing code holds pers_lock can seq_printf() in md_seq_show(). Do we see
issues with this?

Hi Kuai,

This patch doesn't apply cleanly to md-next now. Please rebase and send v2.

Thanks,
Song

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH -next] md: simplify md_seq_ops
  2023-09-22 21:22     ` Song Liu
@ 2023-09-25  1:07       ` Yu Kuai
  0 siblings, 0 replies; 6+ messages in thread
From: Yu Kuai @ 2023-09-25  1:07 UTC (permalink / raw)
  To: Song Liu, Yu Kuai
  Cc: Mariusz Tkaczyk, linux-raid, linux-kernel, yi.zhang, yangerkun,
	yukuai (C)

Hi,

在 2023/09/23 5:22, Song Liu 写道:
> On Mon, Sep 11, 2023 at 6:02 PM Yu Kuai <yukuai1@huaweicloud.com> wrote:
>>
> [...]
>>>> +static void *md_seq_start(struct seq_file *seq, loff_t *pos)
>>>> +{
>>>> +    struct md_personality *pers;
>>>> +
>>>> +    seq_puts(seq, "Personalities : ");
>>>> +    spin_lock(&pers_lock);
>>>> +    list_for_each_entry(pers, &pers_list, list)
>>>> +            seq_printf(seq, "[%s] ", pers->name);
>>>> +
>>>> +    spin_unlock(&pers_lock);
>>>> +    seq_puts(seq, "\n");
>>>> +    seq->poll_event = atomic_read(&md_event_count);
>>>> +
>>>> +    spin_lock(&all_mddevs_lock);
>>>
>>> I would prefer to increase "active" instead holding lock when enumerating over
>>> the devices. the main reason is that parsing mdstat is implemented in mdadm, so
>>> it could kind of blocker action- for example mdmon follows mdstat so it is read
>>> frequently. The time of getting other actions done can highly increase because
>>> every open or sysfs_read/write requires this lock.
> 
> Existing code holds pers_lock can seq_printf() in md_seq_show(). Do we see
> issues with this?

before this patch, in each loop:
- hold lock, get mddev, drop lock
- md_seq_show

and after this patch:
- hold lock in start, drop lock in stop
- lock is always held in each loop

And mariusz is concerned that lock time is increased and may cause some
performance regression.

We've discussed in slack, and decided to keep this behaviour. I'll
update this in v2.

Thanks,
Kuai

> 
> Hi Kuai,
> 
> This patch doesn't apply cleanly to md-next now. Please rebase and send v2.
> 
> Thanks,
> Song
> .
> 


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-09-25  1:07 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-11  6:50 [PATCH -next] md: simplify md_seq_ops Yu Kuai
2023-09-11 14:05 ` Mariusz Tkaczyk
2023-09-12  1:02   ` Yu Kuai
2023-09-13 10:32     ` Mariusz Tkaczyk
2023-09-22 21:22     ` Song Liu
2023-09-25  1:07       ` Yu Kuai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).