* [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt
@ 2016-03-22 10:19 Mike Galbraith
2016-03-22 10:34 ` Mike Galbraith
2016-03-30 8:56 ` Sebastian Andrzej Siewior
0 siblings, 2 replies; 9+ messages in thread
From: Mike Galbraith @ 2016-03-22 10:19 UTC (permalink / raw)
To: Sebastian Andrzej Siewior, RT
I should probably just turn it off in distro config, but...
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
OTOH, they're a lot less wasteful than adding an rtmutex per page.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
---
drivers/block/zram/zram_drv.c | 29 ++++++++++++++++-------------
drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 13 deletions(-)
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc
goto out_error;
}
+ zram_meta_init_table_locks(meta, disksize);
+
return meta;
out_error:
@@ -568,12 +570,13 @@ static int zram_decompress_page(struct z
unsigned long handle;
size_t size;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
clear_page(mem);
return 0;
}
@@ -584,7 +587,7 @@ static int zram_decompress_page(struct z
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
@@ -604,14 +607,14 @@ static int zram_bvec_read(struct zram *z
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
handle_zero_page(bvec);
return 0;
}
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -689,10 +692,10 @@ static int zram_bvec_write(struct zram *
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.zero_pages);
ret = 0;
@@ -752,12 +755,12 @@ static int zram_bvec_write(struct zram *
* Free memory associated with this sector
* before overwriting unused sectors.
*/
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
meta->table[index].handle = handle;
zram_set_obj_size(meta, index, clen);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
@@ -800,9 +803,9 @@ static void zram_bio_discard(struct zram
}
while (n >= PAGE_SIZE) {
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
@@ -928,9 +931,9 @@ static void zram_slot_free_notify(struct
zram = bdev->bd_disk->private_data;
meta = zram->meta;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.notify_free);
}
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -72,6 +72,9 @@ enum zram_pageflags {
struct zram_table_entry {
unsigned long handle;
unsigned long value;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t lock;
+#endif
};
struct zram_stats {
@@ -119,4 +122,42 @@ struct zram {
*/
bool claim; /* Protected by bdev->bd_mutex */
};
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+static inline void zram_lock_table(struct zram_table_entry *table)
+{
+ bit_spin_lock(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_unlock_table(struct zram_table_entry *table)
+{
+ bit_spin_unlock(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { }
+#else /* CONFIG_PREEMPT_RT_BASE */
+static inline void zram_lock_table(struct zram_table_entry *table)
+{
+ spin_lock(&table->lock);
+ __set_bit(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_unlock_table(struct zram_table_entry *table)
+{
+ __clear_bit(ZRAM_ACCESS, &table->value);
+ spin_unlock(&table->lock);
+}
+
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
+{
+ size_t num_pages = disksize >> PAGE_SHIFT;
+ size_t index;
+
+ for (index = 0; index < num_pages; index++) {
+ spinlock_t *lock = &meta->table[index].lock;
+ spin_lock_init(lock);
+ }
+}
+#endif /* CONFIG_PREEMPT_RT_BASE */
+
#endif
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-22 10:19 [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt Mike Galbraith @ 2016-03-22 10:34 ` Mike Galbraith 2016-03-30 8:56 ` Sebastian Andrzej Siewior 1 sibling, 0 replies; 9+ messages in thread From: Mike Galbraith @ 2016-03-22 10:34 UTC (permalink / raw) To: Sebastian Andrzej Siewior, RT ltp says it works fine.. though I personally can't imagine ever wanting to use the thing. [ 1119.471828] zram: Added device: zram0 [ 1119.474630] zram0: detected capacity change from 0 to 536870912 [ 1120.027155] zram: 16356 (zram01) Attribute compr_data_size (and others) will be removed. See zram documentation. [ 1120.808081] zram0: detected capacity change from 536870912 to 0 [ 1120.809625] zram: Removed device: zram0 [ 1120.833365] zram: Added device: zram0 [ 1120.833903] zram: Added device: zram1 [ 1120.834365] zram: Added device: zram2 [ 1120.835769] zram: Added device: zram3 [ 1120.846528] zram0: detected capacity change from 0 to 26214400 [ 1120.846898] zram1: detected capacity change from 0 to 26214400 [ 1120.847165] zram2: detected capacity change from 0 to 26214400 [ 1120.847352] zram3: detected capacity change from 0 to 26214400 [ 1120.872719] BTRFS: device fsid 65d42bcc-8185-47d2-984f-64e097dd3356 devid 1 transid 3 /dev/zram3 [ 1120.875637] EXT4-fs (zram0): mounting ext3 file system using the ext4 subsystem [ 1120.875825] EXT4-fs (zram0): mounted filesystem with ordered data mode. Opts: (null) [ 1120.878829] EXT4-fs (zram1): mounted filesystem with ordered data mode. Opts: (null) [ 1120.881819] XFS (zram2): Mounting V4 Filesystem [ 1120.883469] XFS (zram2): Ending clean mount [ 1120.886281] BTRFS info (device zram3): disk space caching is enabled [ 1120.886285] BTRFS: has skinny extents [ 1120.886985] BTRFS: detected SSD devices, enabling SSD mode [ 1120.887022] BTRFS: creating UUID tree [ 1159.016910] XFS (zram2): Unmounting Filesystem [ 1159.060865] zram0: detected capacity change from 26214400 to 0 [ 1159.060976] zram1: detected capacity change from 26214400 to 0 [ 1159.061155] zram2: detected capacity change from 26214400 to 0 [ 1159.061263] zram3: detected capacity change from 26214400 to 0 [ 1159.061776] zram: Removed device: zram0 [ 1159.079823] zram: Removed device: zram1 [ 1159.094944] zram: Removed device: zram2 [ 1159.110827] zram: Removed device: zram3 [ 1159.143375] zram: Added device: zram0 [ 1159.543950] zram0: detected capacity change from 0 to 107374182400 [ 1159.551720] Adding 104857596k swap on /dev/zram0. Priority:-1 extents:1 across:104857596k SSFS [ 1159.742644] zram0: detected capacity change from 107374182400 to 0 [ 1159.743304] zram: Removed device: zram0 [ 1159.771532] zram: Added device: zram0 [ 1159.773866] zram0: detected capacity change from 0 to 536870912 [ 1160.334486] zram: 28112 (zram03) Attribute compr_data_size (and others) will be removed. See zram documentation. [ 1161.129510] zram0: detected capacity change from 536870912 to 0 [ 1161.131034] zram: Removed device: zram0 ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-22 10:19 [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt Mike Galbraith 2016-03-22 10:34 ` Mike Galbraith @ 2016-03-30 8:56 ` Sebastian Andrzej Siewior 2016-03-30 9:13 ` Sergey Senozhatsky 2016-03-30 9:23 ` Mike Galbraith 1 sibling, 2 replies; 9+ messages in thread From: Sebastian Andrzej Siewior @ 2016-03-30 8:56 UTC (permalink / raw) To: Mike Galbraith, Minchan Kim, Nitin Gupta, Sergey Senozhatsky; +Cc: RT * Mike Galbraith | 2016-03-22 11:19:39 [+0100]: >--- a/drivers/block/zram/zram_drv.c >+++ b/drivers/block/zram/zram_drv.c >@@ -568,12 +570,13 @@ static int zram_decompress_page(struct z > unsigned long handle; > size_t size; > >- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); >+ zram_lock_table(&meta->table[index]); > handle = meta->table[index].handle; > size = zram_get_obj_size(meta, index); > > if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); >+ zram_unlock_table(&meta->table[index]); shouldn't you remove that ZRAM_ACCESS lock here? > clear_page(mem); > return 0; > } >--- a/drivers/block/zram/zram_drv.h >+++ b/drivers/block/zram/zram_drv.h >@@ -72,6 +72,9 @@ enum zram_pageflags { > struct zram_table_entry { > unsigned long handle; > unsigned long value; >+#ifdef CONFIG_PREEMPT_RT_BASE >+ spinlock_t lock; >+#endif > }; > > struct zram_stats { >@@ -119,4 +122,42 @@ struct zram { > */ > bool claim; /* Protected by bdev->bd_mutex */ > }; >+ >+#ifndef CONFIG_PREEMPT_RT_BASE >+static inline void zram_lock_table(struct zram_table_entry *table) >+{ >+ bit_spin_lock(ZRAM_ACCESS, &table->value); >+} >+ >+static inline void zram_unlock_table(struct zram_table_entry *table) >+{ >+ bit_spin_unlock(ZRAM_ACCESS, &table->value); >+} >+ >+static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { } >+#else /* CONFIG_PREEMPT_RT_BASE */ >+static inline void zram_lock_table(struct zram_table_entry *table) >+{ >+ spin_lock(&table->lock); >+ __set_bit(ZRAM_ACCESS, &table->value); >+} >+ >+static inline void zram_unlock_table(struct zram_table_entry *table) >+{ >+ __clear_bit(ZRAM_ACCESS, &table->value); >+ spin_unlock(&table->lock); >+} ZRAM_ACCESS is the only bit used for locking. ZRAM_ZERO is the only flag set / tested. Would it be possible to make value u32 and add a spinlock? value is has not 64bit on 64bit systems and it uses only the first 23bits for the size and bit 24+25 for the two flags we have now. So the size should not change on 64bit systems only increase by four byte on 32bit systems. That is without the lock debugging of course. Minchan, Nitin, Sergey do see any reason not to do so? >+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) >+{ >+ size_t num_pages = disksize >> PAGE_SHIFT; >+ size_t index; >+ >+ for (index = 0; index < num_pages; index++) { >+ spinlock_t *lock = &meta->table[index].lock; >+ spin_lock_init(lock); >+ } >+} >+#endif /* CONFIG_PREEMPT_RT_BASE */ >+ > #endif Sebastian ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-30 8:56 ` Sebastian Andrzej Siewior @ 2016-03-30 9:13 ` Sergey Senozhatsky 2016-03-30 9:19 ` Sebastian Andrzej Siewior 2016-03-30 9:23 ` Mike Galbraith 1 sibling, 1 reply; 9+ messages in thread From: Sergey Senozhatsky @ 2016-03-30 9:13 UTC (permalink / raw) To: Sebastian Andrzej Siewior Cc: Mike Galbraith, Minchan Kim, Nitin Gupta, Sergey Senozhatsky, RT On (03/30/16 10:56), Sebastian Andrzej Siewior wrote: [..] > >+static inline void zram_unlock_table(struct zram_table_entry *table) > >+{ > >+ __clear_bit(ZRAM_ACCESS, &table->value); > >+ spin_unlock(&table->lock); > >+} > > ZRAM_ACCESS is the only bit used for locking. ZRAM_ZERO is the only flag > set / tested. > Would it be possible to make value u32 and add a spinlock? value is has > not 64bit on 64bit systems and it uses only the first 23bits for the > size and bit 24+25 for the two flags we have now. So the size should not > change on 64bit systems only increase by four byte on 32bit systems. > That is without the lock debugging of course. > > Minchan, Nitin, Sergey do see any reason not to do so? that's increased size for every table entry + lock debugging bloat; not exactly what zram is trying to do. sounds bad enough. -ss ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-30 9:13 ` Sergey Senozhatsky @ 2016-03-30 9:19 ` Sebastian Andrzej Siewior 0 siblings, 0 replies; 9+ messages in thread From: Sebastian Andrzej Siewior @ 2016-03-30 9:19 UTC (permalink / raw) To: Sergey Senozhatsky; +Cc: Mike Galbraith, Minchan Kim, Nitin Gupta, RT On 03/30/2016 11:13 AM, Sergey Senozhatsky wrote: > On (03/30/16 10:56), Sebastian Andrzej Siewior wrote: > [..] >>> +static inline void zram_unlock_table(struct zram_table_entry *table) >>> +{ >>> + __clear_bit(ZRAM_ACCESS, &table->value); >>> + spin_unlock(&table->lock); >>> +} >> >> ZRAM_ACCESS is the only bit used for locking. ZRAM_ZERO is the only flag >> set / tested. >> Would it be possible to make value u32 and add a spinlock? value is has >> not 64bit on 64bit systems and it uses only the first 23bits for the >> size and bit 24+25 for the two flags we have now. So the size should not >> change on 64bit systems only increase by four byte on 32bit systems. >> That is without the lock debugging of course. >> >> Minchan, Nitin, Sergey do see any reason not to do so? > > that's increased size for every table entry + lock debugging bloat; > not exactly what zram is trying to do. sounds bad enough. That size is _only_ increased with lockdep enabled. So if you that much concerned about size of this struct why not disable lockdep? > > -ss > Sebastian ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-30 8:56 ` Sebastian Andrzej Siewior 2016-03-30 9:13 ` Sergey Senozhatsky @ 2016-03-30 9:23 ` Mike Galbraith 2016-03-31 2:08 ` Mike Galbraith 1 sibling, 1 reply; 9+ messages in thread From: Mike Galbraith @ 2016-03-30 9:23 UTC (permalink / raw) To: Sebastian Andrzej Siewior, Minchan Kim, Nitin Gupta, Sergey Senozhatsky; +Cc: RT On Wed, 2016-03-30 at 10:56 +0200, Sebastian Andrzej Siewior wrote: > * Mike Galbraith | 2016-03-22 11:19:39 [+0100]: > > > --- a/drivers/block/zram/zram_drv.c > > +++ b/drivers/block/zram/zram_drv.c > > @@ -568,12 +570,13 @@ static int zram_decompress_page(struct z > > > > unsigned long handle; > > > > size_t size; > > > > -> > > > bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); > > +> > > > zram_lock_table(&meta->table[index]); > > > > handle = meta->table[index].handle; > > > > size = zram_get_obj_size(meta, index); > > > > > > if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > > > > > > bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); > > +> > > > > > zram_unlock_table(&meta->table[index]); > > shouldn't you remove that ZRAM_ACCESS lock here? Oops, yup. > ZRAM_ACCESS is the only bit used for locking. ZRAM_ZERO is the only flag > set / tested. > Would it be possible to make value u32 and add a spinlock? value is has > not 64bit on 64bit systems and it uses only the first 23bits for the > size and bit 24+25 for the two flags we have now. So the size should not > change on 64bit systems only increase by four byte on 32bit systems. > That is without the lock debugging of course. I started going the raw locks route for the allocator instead of whacking the bit spinlocks, given what it is, but chickened out when I saw compression, and started thinking of just disabling it for rt instead. I flipped back, figuring a bloated zram gizmo is better than no gizmo at all :-/ -Mike ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-30 9:23 ` Mike Galbraith @ 2016-03-31 2:08 ` Mike Galbraith 2016-04-01 10:42 ` Sebastian Andrzej Siewior 0 siblings, 1 reply; 9+ messages in thread From: Mike Galbraith @ 2016-03-31 2:08 UTC (permalink / raw) To: Sebastian Andrzej Siewior, Minchan Kim, Nitin Gupta, Sergey Senozhatsky; +Cc: RT On Wed, 2016-03-30 at 11:23 +0200, Mike Galbraith wrote: > On Wed, 2016-03-30 at 10:56 +0200, Sebastian Andrzej Siewior wrote: > > * Mike Galbraith | 2016-03-22 11:19:39 [+0100]: > > > > > --- a/drivers/block/zram/zram_drv.c > > > +++ b/drivers/block/zram/zram_drv.c > > > @@ -568,12 +570,13 @@ static int zram_decompress_page(struct z > > > > > > > > unsigned long handle; > > > > > > > > size_t size; > > > > > > -> > > > > > > > > > bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); > > > +> > > > > > > > > > zram_lock_table(&meta->table[index]); > > > > > > > > handle = meta->table[index].handle; > > > > > > > > size = zram_get_obj_size(meta, index); > > > > > > > > > > > if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { > > > > > > > > > > > > > > > > bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); > > > +> > > > > > > > > > > > > > > > > > zram_unlock_table(&meta->table[index]); > > > > shouldn't you remove that ZRAM_ACCESS lock here? > > Oops, yup. > > > ZRAM_ACCESS is the only bit used for locking. ZRAM_ZERO is the only flag > > set / tested. > > Would it be possible to make value u32 and add a spinlock? value is has > > not 64bit on 64bit systems and it uses only the first 23bits for the > > size and bit 24+25 for the two flags we have now. So the size should not > > change on 64bit systems only increase by four byte on 32bit systems. > > That is without the lock debugging of course. > > I started going the raw locks route for the allocator instead of > whacking the bit spinlocks, given what it is, but chickened out when I > saw compression, and started thinking of just disabling it for rt > instead. I flipped back, figuring a bloated zram gizmo is better than > no gizmo at all :-/ (fixed the missed bit spinlock whackage you pointed out) drivers/block/zram: Replace bit spinlocks with rtmutex for -rt They're nondeterministic, and lead to ___might_sleep() splats in -rt. OTOH, they're a lot less wasteful than an rtmutex per page. Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> --- drivers/block/zram/zram_drv.c | 30 ++++++++++++++++-------------- drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 14 deletions(-) --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc goto out_error; } + zram_meta_init_table_locks(meta, disksize); + return meta; out_error: @@ -568,12 +570,12 @@ static int zram_decompress_page(struct z unsigned long handle; size_t size; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); handle = meta->table[index].handle; size = zram_get_obj_size(meta, index); if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); clear_page(mem); return 0; } @@ -584,7 +586,7 @@ static int zram_decompress_page(struct z else ret = zcomp_decompress(zram->comp, cmem, size, mem); zs_unmap_object(meta->mem_pool, handle); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret)) { @@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *z struct zram_meta *meta = zram->meta; page = bvec->bv_page; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); if (unlikely(!meta->table[index].handle) || zram_test_flag(meta, index, ZRAM_ZERO)) { - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); handle_zero_page(bvec); return 0; } - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); if (is_partial_io(bvec)) /* Use a temporary buffer to decompress the page */ @@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram * if (user_mem) kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); zram_set_flag(meta, index, ZRAM_ZERO); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.zero_pages); ret = 0; @@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram * * Free memory associated with this sector * before overwriting unused sectors. */ - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); meta->table[index].handle = handle; zram_set_obj_size(meta, index, clen); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); /* Update stats */ atomic64_add(clen, &zram->stats.compr_data_size); @@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram } while (n >= PAGE_SIZE) { - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.notify_free); index++; n -= PAGE_SIZE; @@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct zram = bdev->bd_disk->private_data; meta = zram->meta; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.notify_free); } --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -72,6 +72,9 @@ enum zram_pageflags { struct zram_table_entry { unsigned long handle; unsigned long value; +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t lock; +#endif }; struct zram_stats { @@ -119,4 +122,42 @@ struct zram { */ bool claim; /* Protected by bdev->bd_mutex */ }; + +#ifndef CONFIG_PREEMPT_RT_BASE +static inline void zram_lock_table(struct zram_table_entry *table) +{ + bit_spin_lock(ZRAM_ACCESS, &table->value); +} + +static inline void zram_unlock_table(struct zram_table_entry *table) +{ + bit_spin_unlock(ZRAM_ACCESS, &table->value); +} + +static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { } +#else /* CONFIG_PREEMPT_RT_BASE */ +static inline void zram_lock_table(struct zram_table_entry *table) +{ + spin_lock(&table->lock); + __set_bit(ZRAM_ACCESS, &table->value); +} + +static inline void zram_unlock_table(struct zram_table_entry *table) +{ + __clear_bit(ZRAM_ACCESS, &table->value); + spin_unlock(&table->lock); +} + +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) +{ + size_t num_pages = disksize >> PAGE_SHIFT; + size_t index; + + for (index = 0; index < num_pages; index++) { + spinlock_t *lock = &meta->table[index].lock; + spin_lock_init(lock); + } +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + #endif ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-03-31 2:08 ` Mike Galbraith @ 2016-04-01 10:42 ` Sebastian Andrzej Siewior 2016-04-04 0:27 ` Sergey Senozhatsky 0 siblings, 1 reply; 9+ messages in thread From: Sebastian Andrzej Siewior @ 2016-04-01 10:42 UTC (permalink / raw) To: Mike Galbraith; +Cc: Minchan Kim, Nitin Gupta, Sergey Senozhatsky, RT * Mike Galbraith | 2016-03-31 04:08:28 [+0200]: >(fixed the missed bit spinlock whackage you pointed out) thanks. >drivers/block/zram: Replace bit spinlocks with rtmutex for -rt > >They're nondeterministic, and lead to ___might_sleep() splats in -rt. >OTOH, they're a lot less wasteful than an rtmutex per page. Applied. Although I'm disapointed about Sergey's argument against the spinlock in general due to a few bytes used by lockdep which can be disabled. Not to mention the usefull debug facility lockdep provides if one decides to use. Sebastian ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt 2016-04-01 10:42 ` Sebastian Andrzej Siewior @ 2016-04-04 0:27 ` Sergey Senozhatsky 0 siblings, 0 replies; 9+ messages in thread From: Sergey Senozhatsky @ 2016-04-04 0:27 UTC (permalink / raw) To: Sebastian Andrzej Siewior Cc: Mike Galbraith, Minchan Kim, Nitin Gupta, Sergey Senozhatsky, RT On (04/01/16 12:42), Sebastian Andrzej Siewior wrote: > >They're nondeterministic, and lead to ___might_sleep() splats in -rt. > >OTOH, they're a lot less wasteful than an rtmutex per page. > Applied. > Although I'm disapointed about Sergey's argument against the spinlock in > general due to a few bytes used by lockdep which can be disabled. Not to > mention the usefull debug facility lockdep provides if one decides to > use. I'll take a look. -ss ^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2016-04-04 0:27 UTC | newest] Thread overview: 9+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2016-03-22 10:19 [rfc patch] drivers/block/zram: Replace bit spinlocks with rtmutex for -rt Mike Galbraith 2016-03-22 10:34 ` Mike Galbraith 2016-03-30 8:56 ` Sebastian Andrzej Siewior 2016-03-30 9:13 ` Sergey Senozhatsky 2016-03-30 9:19 ` Sebastian Andrzej Siewior 2016-03-30 9:23 ` Mike Galbraith 2016-03-31 2:08 ` Mike Galbraith 2016-04-01 10:42 ` Sebastian Andrzej Siewior 2016-04-04 0:27 ` Sergey Senozhatsky
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).