* [PATCH 01/19] quota: protect getfmt call with dqonoff_mutex lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-28 14:27 ` Christoph Hellwig
2010-10-22 17:34 ` [PATCH 02/19] quota: Wrap common expression to helper function Dmitry Monakhov
` (17 subsequent siblings)
18 siblings, 1 reply; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
dqptr_sem hasn't any thing in common with quota files,
quota file load protected with dqonoff_mutex, so we have to use
it for reading fmt info.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/quota.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b34bdb2..ce8db30 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -79,13 +79,13 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
- down_read(&sb_dqopt(sb)->dqptr_sem);
+ mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
- up_read(&sb_dqopt(sb)->dqptr_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return -ESRCH;
}
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
- up_read(&sb_dqopt(sb)->dqptr_sem);
+ mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 02/19] quota: Wrap common expression to helper function
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 01/19] quota: protect getfmt call with dqonoff_mutex lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 03/19] quota: mode quota internals from sb to quota_info Dmitry Monakhov
` (16 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
- rename sb_dqopt(sb) to dqopts(sb): returns quota_info structure of the sb
- add new sb_dqopts(dquot): returns quota_info structure of the sb dquot
belongs to.
This helps us to make code more readable.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ext2/super.c | 4 +-
fs/ext3/super.c | 8 ++--
fs/ext4/super.c | 8 ++--
fs/gfs2/ops_fstype.c | 2 +-
fs/jfs/super.c | 4 +-
fs/ocfs2/quota_global.c | 12 +++---
fs/ocfs2/quota_local.c | 34 +++++++-------
fs/ocfs2/super.c | 6 +-
fs/quota/dquot.c | 106 +++++++++++++++++++++++-----------------------
fs/quota/quota_tree.c | 2 +-
fs/quota/quota_v1.c | 14 +++---
fs/reiserfs/super.c | 6 +-
include/linux/quota.h | 1 +
include/linux/quotaops.h | 14 ++++--
14 files changed, 113 insertions(+), 108 deletions(-)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1ec6026..7727491 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1371,7 +1371,7 @@ static int ext2_get_sb(struct file_system_type *fs_type,
static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
@@ -1416,7 +1416,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
static ssize_t ext2_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e9fd676..9740ca2 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1529,7 +1529,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
+ if (dqopts(sb)->files[i])
dquot_quota_off(sb, i);
}
#endif
@@ -2787,7 +2787,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
- return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+ return sb_dqopts(dquot)->files[dquot->dq_type];
}
static int ext3_write_dquot(struct dquot *dquot)
@@ -2930,7 +2930,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
@@ -2968,7 +2968,7 @@ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bcf86b3..e59eb37 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2115,7 +2115,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
+ if (dqopts(sb)->files[i])
dquot_quota_off(sb, i);
}
#endif
@@ -3968,7 +3968,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
- return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+ return sb_dqopts(dquot)->files[dquot->dq_type];
}
static int ext4_write_dquot(struct dquot *dquot)
@@ -4124,7 +4124,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
@@ -4162,7 +4162,7 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4d4b1e8..1e52207 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1168,7 +1168,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers;
sb->s_qcop = &gfs2_quotactl_ops;
- sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+ dqopts(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
sb->s_time_gran = 1;
sb->s_maxbytes = MAX_LFS_FILESIZE;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index ec8c3e4..b612adf 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -655,7 +655,7 @@ static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
@@ -700,7 +700,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
static ssize_t jfs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 4607923..cdae8d1 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -610,7 +610,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
mlog_errno(status);
goto out_ilock;
}
- mutex_lock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_lock(&dqopts(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
if (status < 0)
mlog_errno(status);
@@ -618,7 +618,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
status = ocfs2_local_write_dquot(dquot);
if (status < 0)
mlog_errno(status);
- mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_unlock(&dqopts(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
@@ -657,9 +657,9 @@ static int ocfs2_write_dquot(struct dquot *dquot)
mlog_errno(status);
goto out;
}
- mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
+ mutex_lock(&sb_dqopts(dquot)->dqio_mutex);
status = ocfs2_local_write_dquot(dquot);
- mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
+ mutex_unlock(&sb_dqopts(dquot)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out:
mlog_exit(status);
@@ -854,7 +854,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
mlog_errno(status);
goto out_ilock;
}
- mutex_lock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_lock(&dqopts(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
if (status < 0) {
mlog_errno(status);
@@ -863,7 +863,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
/* Now write updated local dquot structure */
status = ocfs2_local_write_dquot(dquot);
out_dlock:
- mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_unlock(&dqopts(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc78764..056cb24 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -173,7 +173,7 @@ static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE };
struct buffer_head *bh = NULL;
- struct inode *linode = sb_dqopt(sb)->files[type];
+ struct inode *linode = dqopts(sb)->files[type];
struct inode *ginode = NULL;
struct ocfs2_disk_dqheader *dqhead;
int status, ret = 0;
@@ -522,7 +522,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
mlog_errno(status);
goto out_drop_lock;
}
- mutex_lock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_lock(&dqopts(sb)->dqio_mutex);
spin_lock(&dq_data_lock);
/* Add usage from quota entry into quota changes
* of our node. Auxiliary variables are important
@@ -555,7 +555,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
out_commit:
- mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_unlock(&dqopts(sb)->dqio_mutex);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out_drop_lock:
ocfs2_unlock_global_qf(oinfo, 1);
@@ -596,7 +596,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
unsigned int flags;
mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopts(sb)->dqonoff_mutex);
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
@@ -672,7 +672,7 @@ out_put:
break;
}
out:
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
kfree(rec);
return status;
}
@@ -683,7 +683,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
struct ocfs2_local_disk_dqinfo *ldinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo;
- struct inode *lqinode = sb_dqopt(sb)->files[type];
+ struct inode *lqinode = dqopts(sb)->files[type];
int status;
struct buffer_head *bh = NULL;
struct ocfs2_quota_recovery *rec;
@@ -691,7 +691,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
/* We don't need the lock and we have to acquire quota file locks
* which will later depend on this lock */
- mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_unlock(&dqopts(sb)->dqio_mutex);
info->dqi_maxblimit = 0x7fffffffffffffffLL;
info->dqi_maxilimit = 0x7fffffffffffffffLL;
oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
@@ -770,7 +770,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
goto out_err;
}
- mutex_lock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_lock(&dqopts(sb)->dqio_mutex);
return 0;
out_err:
if (oinfo) {
@@ -784,7 +784,7 @@ out_err:
kfree(oinfo);
}
brelse(bh);
- mutex_lock(&sb_dqopt(sb)->dqio_mutex);
+ mutex_lock(&dqopts(sb)->dqio_mutex);
return -1;
}
@@ -796,7 +796,7 @@ static int ocfs2_local_write_info(struct super_block *sb, int type)
->dqi_libh;
int status;
- status = ocfs2_modify_bh(sb_dqopt(sb)->files[type], bh, olq_update_info,
+ status = ocfs2_modify_bh(dqopts(sb)->files[type], bh, olq_update_info,
info);
if (status < 0) {
mlog_errno(status);
@@ -849,7 +849,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
/* Mark local file as clean */
info->dqi_flags |= OLQF_CLEAN;
- status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
+ status = ocfs2_modify_bh(dqopts(sb)->files[type],
oinfo->dqi_libh,
olq_update_info,
info);
@@ -859,7 +859,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
}
out:
- ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1);
+ ocfs2_inode_unlock(dqopts(sb)->files[type], 1);
brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
@@ -893,7 +893,7 @@ int ocfs2_local_write_dquot(struct dquot *dquot)
struct super_block *sb = dquot->dq_sb;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
struct buffer_head *bh;
- struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
+ struct inode *lqinode = dqopts(sb)->files[dquot->dq_type];
int status;
status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
@@ -962,7 +962,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
- struct inode *lqinode = sb_dqopt(sb)->files[type];
+ struct inode *lqinode = dqopts(sb)->files[type];
struct ocfs2_quota_chunk *chunk = NULL;
struct ocfs2_local_disk_chunk *dchunk;
int status;
@@ -1094,7 +1094,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_quota_chunk *chunk;
- struct inode *lqinode = sb_dqopt(sb)->files[type];
+ struct inode *lqinode = dqopts(sb)->files[type];
struct ocfs2_local_disk_chunk *dchunk;
int epb = ol_quota_entries_per_block(sb);
unsigned int chunk_blocks;
@@ -1215,7 +1215,7 @@ int ocfs2_create_local_dquot(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_type;
- struct inode *lqinode = sb_dqopt(sb)->files[type];
+ struct inode *lqinode = dqopts(sb)->files[type];
struct ocfs2_quota_chunk *chunk;
struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
int offset;
@@ -1275,7 +1275,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
int offset;
status = ocfs2_journal_access_dq(handle,
- INODE_CACHE(sb_dqopt(sb)->files[type]),
+ INODE_CACHE(dqopts(sb)->files[type]),
od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26bd015..dc5d1ab 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -907,7 +907,7 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
int status;
int type;
- sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
+ dqopts(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
for (type = 0; type < MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
@@ -949,7 +949,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
/* Cancel periodic syncing before we grab dqonoff_mutex */
oinfo = sb_dqinfo(sb, type)->dqi_priv;
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
- inode = igrab(sb->s_dquot.files[type]);
+ inode = igrab(dqopts(sb)->files[type]);
/* Turn off quotas. This will remove all dquot structures from
* memory and so they will be automatically synced to global
* quota files */
@@ -970,7 +970,7 @@ static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
return -EINVAL;
- return dquot_enable(sb_dqopt(sb)->files[type], type,
+ return dquot_enable(dqopts(sb)->files[type], type,
format_id, DQUOT_LIMITS_ENABLED);
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 06157aa..fe45466 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -346,7 +346,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
spin_lock(&dq_list_lock);
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
- list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
+ list_add(&dquot->dq_dirty, &sb_dqopts(dquot)->
info[dquot->dq_type].dqi_dirty_list);
ret = 0;
}
@@ -390,7 +390,7 @@ static inline int clear_dquot_dirty(struct dquot *dquot)
void mark_info_dirty(struct super_block *sb, int type)
{
- set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
+ set_bit(DQF_INFO_DIRTY_B, &dqopts(sb)->info[type].dqi_flags);
}
EXPORT_SYMBOL(mark_info_dirty);
@@ -401,7 +401,7 @@ EXPORT_SYMBOL(mark_info_dirty);
int dquot_acquire(struct dquot *dquot)
{
int ret = 0, ret2 = 0;
- struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ struct quota_info *dqopt = sb_dqopts(dquot);
mutex_lock(&dquot->dq_lock);
mutex_lock(&dqopt->dqio_mutex);
@@ -439,7 +439,7 @@ EXPORT_SYMBOL(dquot_acquire);
int dquot_commit(struct dquot *dquot)
{
int ret = 0, ret2 = 0;
- struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ struct quota_info *dqopt = sb_dqopts(dquot);
mutex_lock(&dqopt->dqio_mutex);
spin_lock(&dq_list_lock);
@@ -471,7 +471,7 @@ EXPORT_SYMBOL(dquot_commit);
int dquot_release(struct dquot *dquot)
{
int ret = 0, ret2 = 0;
- struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ struct quota_info *dqopt = sb_dqopts(dquot);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
@@ -568,7 +568,7 @@ int dquot_scan_active(struct super_block *sb,
struct dquot *dquot, *old_dquot = NULL;
int ret = 0;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopts(sb)->dqonoff_mutex);
spin_lock(&dq_list_lock);
list_for_each_entry(dquot, &inuse_list, dq_inuse) {
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
@@ -591,7 +591,7 @@ int dquot_scan_active(struct super_block *sb,
spin_unlock(&dq_list_lock);
out:
dqput(old_dquot);
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
return ret;
}
EXPORT_SYMBOL(dquot_scan_active);
@@ -600,7 +600,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
{
struct list_head *dirty;
struct dquot *dquot;
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
int cnt;
mutex_lock(&dqopt->dqonoff_mutex);
@@ -639,7 +639,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
- if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+ if (!wait || (dqopts(sb)->flags & DQUOT_QUOTA_SYS_FILE))
return 0;
/* This is not very clever (and fast) but currently I don't know about
@@ -653,18 +653,18 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopts(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
+ mutex_lock_nested(&dqopts(sb)->files[cnt]->i_mutex,
I_MUTEX_QUOTA);
- truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
- mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+ truncate_inode_pages(&dqopts(sb)->files[cnt]->i_data, 0);
+ mutex_unlock(&dqopts(sb)->files[cnt]->i_mutex);
}
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
return 0;
}
@@ -1033,9 +1033,9 @@ static void drop_dquot_ref(struct super_block *sb, int type)
LIST_HEAD(tofree_head);
if (sb->dq_op) {
- down_write(&sb_dqopt(sb)->dqptr_sem);
+ down_write(&dqopts(sb)->dqptr_sem);
remove_dquot_ref(sb, type, &tofree_head);
- up_write(&sb_dqopt(sb)->dqptr_sem);
+ up_write(&dqopts(sb)->dqptr_sem);
put_dquot_list(&tofree_head);
}
}
@@ -1081,7 +1081,7 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
- if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+ if (sb_dqopts(dquot)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
dquot->dq_dqb.dqb_curinodes -= number;
else
@@ -1093,7 +1093,7 @@ static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
- if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+ if (sb_dqopts(dquot)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
dquot->dq_dqb.dqb_curspace -= number;
else
@@ -1203,7 +1203,7 @@ static void flush_warnings(struct dquot *const *dquots, char *warntype)
static int ignore_hardlimit(struct dquot *dquot)
{
- struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+ struct mem_dqinfo *info = &sb_dqopts(dquot)->info[dquot->dq_type];
return capable(CAP_SYS_RESOURCE) &&
(info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
@@ -1241,7 +1241,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
dquot->dq_dqb.dqb_itime == 0) {
*warntype = QUOTA_NL_ISOFTWARN;
dquot->dq_dqb.dqb_itime = get_seconds() +
- sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+ sb_dqopts(dquot)->info[dquot->dq_type].dqi_igrace;
}
return 0;
@@ -1285,7 +1285,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
if (!prealloc) {
*warntype = QUOTA_NL_BSOFTWARN;
dquot->dq_dqb.dqb_btime = get_seconds() +
- sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
+ dqopts(sb)->info[dquot->dq_type].dqi_bgrace;
}
else
/*
@@ -1377,7 +1377,7 @@ static void __dquot_initialize(struct inode *inode, int type)
got[cnt] = dqget(sb, id, cnt);
}
- down_write(&sb_dqopt(sb)->dqptr_sem);
+ down_write(&dqopts(sb)->dqptr_sem);
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1402,7 +1402,7 @@ static void __dquot_initialize(struct inode *inode, int type)
}
}
out_err:
- up_write(&sb_dqopt(sb)->dqptr_sem);
+ up_write(&dqopts(sb)->dqptr_sem);
/* Drop unused references */
dqput_all(got);
}
@@ -1421,12 +1421,12 @@ static void __dquot_drop(struct inode *inode)
int cnt;
struct dquot *put[MAXQUOTAS];
- down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_write(&dqopts(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt];
inode->i_dquot[cnt] = NULL;
}
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_write(&dqopts(inode->i_sb)->dqptr_sem);
dqput_all(put);
}
@@ -1550,7 +1550,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out;
}
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_read(&dqopts(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@@ -1581,7 +1581,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
mark_all_dquot_dirty(inode->i_dquot);
out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_read(&dqopts(inode->i_sb)->dqptr_sem);
out:
return ret;
}
@@ -1601,7 +1601,7 @@ int dquot_alloc_inode(const struct inode *inode)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_read(&dqopts(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1622,7 +1622,7 @@ warn_put_all:
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_read(&dqopts(inode->i_sb)->dqptr_sem);
return ret;
}
EXPORT_SYMBOL(dquot_alloc_inode);
@@ -1639,7 +1639,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
return 0;
}
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_read(&dqopts(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1651,7 +1651,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_read(&dqopts(inode->i_sb)->dqptr_sem);
return 0;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1672,7 +1672,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
return;
}
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_read(&dqopts(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1691,7 +1691,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
mark_all_dquot_dirty(inode->i_dquot);
out_unlock:
flush_warnings(inode->i_dquot, warntype);
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_read(&dqopts(inode->i_sb)->dqptr_sem);
}
EXPORT_SYMBOL(__dquot_free_space);
@@ -1708,7 +1708,7 @@ void dquot_free_inode(const struct inode *inode)
if (!dquot_active(inode))
return;
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_read(&dqopts(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1719,7 +1719,7 @@ void dquot_free_inode(const struct inode *inode)
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_read(&dqopts(inode->i_sb)->dqptr_sem);
}
EXPORT_SYMBOL(dquot_free_inode);
@@ -1750,9 +1750,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype_to[cnt] = QUOTA_NL_NOWARN;
- down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ down_write(&dqopts(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_write(&dqopts(inode->i_sb)->dqptr_sem);
return 0;
}
spin_lock(&dq_data_lock);
@@ -1804,7 +1804,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
inode->i_dquot[cnt] = transfer_to[cnt];
}
spin_unlock(&dq_data_lock);
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_write(&dqopts(inode->i_sb)->dqptr_sem);
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
@@ -1818,7 +1818,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
return 0;
over_quota:
spin_unlock(&dq_data_lock);
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_write(&dqopts(inode->i_sb)->dqptr_sem);
flush_warnings(transfer_to, warntype_to);
return ret;
}
@@ -1853,7 +1853,7 @@ EXPORT_SYMBOL(dquot_transfer);
int dquot_commit_info(struct super_block *sb, int type)
{
int ret;
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
mutex_lock(&dqopt->dqio_mutex);
ret = dqopt->ops[type]->write_file_info(sb, type);
@@ -1896,7 +1896,7 @@ EXPORT_SYMBOL(dquot_file_open);
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
{
int cnt, ret = 0;
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
struct inode *toputinode[MAXQUOTAS];
/* Cannot turn off usage accounting without turning off limits, or
@@ -2045,7 +2045,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
{
struct quota_format_type *fmt = find_quota_format(format_id);
struct super_block *sb = inode->i_sb;
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
int error;
int oldflags = -1;
@@ -2151,7 +2151,7 @@ out_fmt:
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
{
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
struct inode *inode;
int ret = 0, cnt;
unsigned int flags;
@@ -2211,7 +2211,7 @@ int dquot_enable(struct inode *inode, int type, int format_id,
{
int ret = 0;
struct super_block *sb = inode->i_sb;
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
/* Just unsuspend quotas? */
BUG_ON(flags & DQUOT_SUSPENDED);
@@ -2237,7 +2237,7 @@ int dquot_enable(struct inode *inode, int type, int format_id,
goto out_lock;
}
spin_lock(&dq_state_lock);
- sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
+ dqopts(sb)->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
out_lock:
mutex_unlock(&dqopt->dqonoff_mutex);
@@ -2339,7 +2339,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
- struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+ struct mem_dqinfo *dqi = &sb_dqopts(dquot)->info[dquot->dq_type];
if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
return -EINVAL;
@@ -2449,19 +2449,19 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopts(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
return -ESRCH;
}
- mi = sb_dqopt(sb)->info + type;
+ mi = dqopts(sb)->info + type;
spin_lock(&dq_data_lock);
ii->dqi_bgrace = mi->dqi_bgrace;
ii->dqi_igrace = mi->dqi_igrace;
ii->dqi_flags = mi->dqi_flags & DQF_MASK;
ii->dqi_valid = IIF_ALL;
spin_unlock(&dq_data_lock);
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
return 0;
}
EXPORT_SYMBOL(dquot_get_dqinfo);
@@ -2472,12 +2472,12 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
struct mem_dqinfo *mi;
int err = 0;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopts(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
err = -ESRCH;
goto out;
}
- mi = sb_dqopt(sb)->info + type;
+ mi = dqopts(sb)->info + type;
spin_lock(&dq_data_lock);
if (ii->dqi_valid & IIF_BGRACE)
mi->dqi_bgrace = ii->dqi_bgrace;
@@ -2491,7 +2491,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
/* Force write to disk */
sb->dq_op->write_info(sb, type);
out:
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopts(sb)->dqonoff_mutex);
return err;
}
EXPORT_SYMBOL(dquot_set_dqinfo);
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874..c0917f4 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -596,7 +596,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
#ifdef __QUOTA_QT_PARANOIA
/* Invalidated quota? */
- if (!sb_dqopt(dquot->dq_sb)->files[type]) {
+ if (!sb_dqopts(dquot)->files[type]) {
quota_error(sb, "Quota invalidated while reading!");
return -EIO;
}
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 34b37a6..cab3ca3 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -57,7 +57,7 @@ static int v1_read_dqblk(struct dquot *dquot)
int type = dquot->dq_type;
struct v1_disk_dqblk dqblk;
- if (!sb_dqopt(dquot->dq_sb)->files[type])
+ if (!sb_dqopts(dquot)->files[type])
return -EINVAL;
/* Set structure to 0s in case read fails/is after end of file */
@@ -85,12 +85,12 @@ static int v1_commit_dqblk(struct dquot *dquot)
v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
if (dquot->dq_id == 0) {
dqblk.dqb_btime =
- sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+ sb_dqopts(dquot)->info[type].dqi_bgrace;
dqblk.dqb_itime =
- sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+ sb_dqopts(dquot)->info[type].dqi_igrace;
}
ret = 0;
- if (sb_dqopt(dquot->dq_sb)->files[type])
+ if (sb_dqopts(dquot)->files[type])
ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
(char *)&dqblk, sizeof(struct v1_disk_dqblk),
v1_dqoff(dquot->dq_id));
@@ -122,7 +122,7 @@ struct v2_disk_dqheader {
static int v1_check_quota_file(struct super_block *sb, int type)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
ulong blocks;
size_t off;
struct v2_disk_dqheader dqhead;
@@ -154,7 +154,7 @@ static int v1_check_quota_file(struct super_block *sb, int type)
static int v1_read_file_info(struct super_block *sb, int type)
{
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
struct v1_disk_dqblk dqblk;
int ret;
@@ -179,7 +179,7 @@ out:
static int v1_write_file_info(struct super_block *sb, int type)
{
- struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_info *dqopt = dqopts(sb);
struct v1_disk_dqblk dqblk;
int ret;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 6e85cfd..707f9dc 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -312,7 +312,7 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
- if (sb_dqopt(s)->files[i] && quota_enabled[i])
+ if (dqopts(s)->files[i] && quota_enabled[i])
dquot_quota_off(s, i);
}
if (ms_active_set)
@@ -2103,7 +2103,7 @@ out:
static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
unsigned long blk = off >> sb->s_blocksize_bits;
int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
size_t toread;
@@ -2148,7 +2148,7 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
- struct inode *inode = sb_dqopt(sb)->files[type];
+ struct inode *inode = dqopts(sb)->files[type];
unsigned long blk = off >> sb->s_blocksize_bits;
int err = 0, offset = off & (sb->s_blocksize - 1), tocopy;
int journal_quota = REISERFS_SB(sb)->s_qf_names[type] != NULL;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 9a85412..00e1b3d 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -182,6 +182,7 @@ enum {
#include <asm/atomic.h>
+
typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
typedef long long qsize_t; /* Type in which we store sizes */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9e09c9a..b154d52 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -13,10 +13,14 @@
#define DQUOT_SPACE_RESERVE 0x2
#define DQUOT_SPACE_NOFAIL 0x4
-static inline struct quota_info *sb_dqopt(struct super_block *sb)
+static inline struct quota_info *dqopts(struct super_block *sb)
{
return &sb->s_dquot;
}
+static inline struct quota_info* sb_dqopts(struct dquot *dq)
+{
+ return dqopts(dq->dq_sb);
+}
/* i_mutex must being held */
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
@@ -95,7 +99,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
{
- return sb_dqopt(sb)->info + type;
+ return dqopts(sb)->info + type;
}
/*
@@ -104,19 +108,19 @@ static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type)
{
- return sb_dqopt(sb)->flags &
+ return dqopts(sb)->flags &
dquot_state_flag(DQUOT_USAGE_ENABLED, type);
}
static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type)
{
- return sb_dqopt(sb)->flags &
+ return dqopts(sb)->flags &
dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
}
static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
{
- return sb_dqopt(sb)->flags &
+ return dqopts(sb)->flags &
dquot_state_flag(DQUOT_SUSPENDED, type);
}
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 03/19] quota: mode quota internals from sb to quota_info
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 01/19] quota: protect getfmt call with dqonoff_mutex lock Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 02/19] quota: Wrap common expression to helper function Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 04/19] quota: Convert dq_state_lock to per-sb dq_state_lock Dmitry Monakhov
` (15 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
Obviously most super_blocks don't use quota. So it is reasonable
to hide it inside pointer. The only fields are steel on sb are:
*flags indicate state, checked without locks
*onoff_mutex
*dq_op
*qcop
We can hide dq_op/qcop inside pointer too, but IMHO it is not necessary.
The only superfluous field is dqptr_sem, but currently it is accessed
from all charge/claim/free code without lock. So it is not easy to
serialize it with quotaon/quotaoff at that stage. But this lock
will be removed in later patches anyway, so let's keep it for awhile.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ext2/super.c | 4 +-
fs/ext3/super.c | 4 +-
fs/ext4/super.c | 4 +-
fs/gfs2/ops_fstype.c | 4 +-
fs/jfs/super.c | 4 +-
fs/ocfs2/quota_local.c | 4 +-
fs/ocfs2/super.c | 6 +-
fs/quota/dquot.c | 243 +++++++++++++++++++++++++-----------------
fs/quota/quota.c | 62 ++++++------
fs/reiserfs/super.c | 4 +-
fs/super.c | 1 -
fs/sync.c | 4 +-
fs/xfs/linux-2.6/xfs_super.c | 2 +-
include/linux/fs.h | 4 +-
include/linux/quota.h | 17 ++-
include/linux/quotaops.h | 15 ++-
16 files changed, 218 insertions(+), 164 deletions(-)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7727491..de8d2c4 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1055,8 +1055,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = ext2_xattr_handlers;
#ifdef CONFIG_QUOTA
- sb->dq_op = &dquot_operations;
- sb->s_qcop = &dquot_quotactl_ops;
+ dqctl(sb)->dq_op = &dquot_operations;
+ dqctl(sb)->qcop = &dquot_quotactl_ops;
#endif
root = ext2_iget(sb, EXT2_ROOT_INO);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9740ca2..7a5c6e5 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1929,8 +1929,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sb->s_export_op = &ext3_export_ops;
sb->s_xattr = ext3_xattr_handlers;
#ifdef CONFIG_QUOTA
- sb->s_qcop = &ext3_qctl_operations;
- sb->dq_op = &ext3_quota_operations;
+ dqctl(sb)->qcop = &ext3_qctl_operations;
+ dqctl(sb)->dq_op = &ext3_quota_operations;
#endif
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e59eb37..e3f4d92 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2938,8 +2938,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
- sb->s_qcop = &ext4_qctl_operations;
- sb->dq_op = &ext4_quota_operations;
+ dqctl(sb)->qcop = &ext4_qctl_operations;
+ dqctl(sb)->dq_op = &ext4_quota_operations;
#endif
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1e52207..43d0a24 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1167,8 +1167,8 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
sb->s_op = &gfs2_super_ops;
sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers;
- sb->s_qcop = &gfs2_quotactl_ops;
- dqopts(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+ dqctl(sb)->qcop = &gfs2_quotactl_ops;
+ dqctl(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
sb->s_time_gran = 1;
sb->s_maxbytes = MAX_LFS_FILESIZE;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index b612adf..a8a94e6 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -477,8 +477,8 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &jfs_super_operations;
sb->s_export_op = &jfs_export_operations;
#ifdef CONFIG_QUOTA
- sb->dq_op = &dquot_operations;
- sb->s_qcop = &dquot_quotactl_ops;
+ dqctl(sb)->dq_op = &dquot_operations;
+ dqctl(sb)->qcop = &dquot_quotactl_ops;
#endif
/*
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 056cb24..7c30ba3 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -596,7 +596,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
unsigned int flags;
mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
- mutex_lock(&dqopts(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
@@ -672,7 +672,7 @@ out_put:
break;
}
out:
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
kfree(rec);
return status;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index dc5d1ab..3ebb43c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -907,7 +907,7 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
int status;
int type;
- dqopts(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
+ dqctl(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
for (type = 0; type < MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
@@ -2014,8 +2014,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_fs_info = osb;
sb->s_op = &ocfs2_sops;
sb->s_export_op = &ocfs2_export_ops;
- sb->s_qcop = &ocfs2_quotactl_ops;
- sb->dq_op = &ocfs2_quota_operations;
+ dqctl(sb)->qcop = &ocfs2_quotactl_ops;
+ dqctl(sb)->dq_op = &ocfs2_quota_operations;
sb->s_xattr = ocfs2_xattr_handlers;
sb->s_time_gran = 1;
sb->s_flags |= MS_NOATIME;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index fe45466..b87435d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -332,7 +332,7 @@ static inline int dquot_dirty(struct dquot *dquot)
static inline int mark_dquot_dirty(struct dquot *dquot)
{
- return dquot->dq_sb->dq_op->mark_dirty(dquot);
+ return dqctl(dquot->dq_sb)->dq_op->mark_dirty(dquot);
}
/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
@@ -406,16 +406,16 @@ int dquot_acquire(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
mutex_lock(&dqopt->dqio_mutex);
if (!test_bit(DQ_READ_B, &dquot->dq_flags))
- ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+ ret = dqopt->fmt_ops[dquot->dq_type]->read_dqblk(dquot);
if (ret < 0)
goto out_iolock;
set_bit(DQ_READ_B, &dquot->dq_flags);
/* Instantiate dquot if needed */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
- ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+ ret = dqopt->fmt_ops[dquot->dq_type]->commit_dqblk(dquot);
/* Write the info if needed */
if (info_dirty(&dqopt->info[dquot->dq_type])) {
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ ret2 = dqopt->fmt_ops[dquot->dq_type]->write_file_info(
dquot->dq_sb, dquot->dq_type);
}
if (ret < 0)
@@ -451,9 +451,9 @@ int dquot_commit(struct dquot *dquot)
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
- ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+ ret = dqopt->fmt_ops[dquot->dq_type]->commit_dqblk(dquot);
if (info_dirty(&dqopt->info[dquot->dq_type])) {
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ ret2 = dqopt->fmt_ops[dquot->dq_type]->write_file_info(
dquot->dq_sb, dquot->dq_type);
}
if (ret >= 0)
@@ -478,11 +478,11 @@ int dquot_release(struct dquot *dquot)
if (atomic_read(&dquot->dq_count) > 1)
goto out_dqlock;
mutex_lock(&dqopt->dqio_mutex);
- if (dqopt->ops[dquot->dq_type]->release_dqblk) {
- ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+ if (dqopt->fmt_ops[dquot->dq_type]->release_dqblk) {
+ ret = dqopt->fmt_ops[dquot->dq_type]->release_dqblk(dquot);
/* Write the info */
if (info_dirty(&dqopt->info[dquot->dq_type])) {
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
+ ret2 = dqopt->fmt_ops[dquot->dq_type]->write_file_info(
dquot->dq_sb, dquot->dq_type);
}
if (ret >= 0)
@@ -504,7 +504,7 @@ EXPORT_SYMBOL(dquot_destroy);
static inline void do_destroy_dquot(struct dquot *dquot)
{
- dquot->dq_sb->dq_op->destroy_dquot(dquot);
+ dqctl(dquot->dq_sb)->dq_op->destroy_dquot(dquot);
}
/* Invalidate all dquots on the list. Note that this function is called after
@@ -568,7 +568,7 @@ int dquot_scan_active(struct super_block *sb,
struct dquot *dquot, *old_dquot = NULL;
int ret = 0;
- mutex_lock(&dqopts(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
spin_lock(&dq_list_lock);
list_for_each_entry(dquot, &inuse_list, dq_inuse) {
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
@@ -591,7 +591,7 @@ int dquot_scan_active(struct super_block *sb,
spin_unlock(&dq_list_lock);
out:
dqput(old_dquot);
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return ret;
}
EXPORT_SYMBOL(dquot_scan_active);
@@ -600,10 +600,11 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
{
struct list_head *dirty;
struct dquot *dquot;
- struct quota_info *dqopt = dqopts(sb);
+ struct quota_info *dqopt;
int cnt;
- mutex_lock(&dqopt->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
+ dqopt = dqopts(sb);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
@@ -625,7 +626,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
- sb->dq_op->write_dquot(dquot);
+ dqctl(sb)->dq_op->write_dquot(dquot);
dqput(dquot);
spin_lock(&dq_list_lock);
}
@@ -635,11 +636,11 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
&& info_dirty(&dqopt->info[cnt]))
- sb->dq_op->write_info(sb, cnt);
+ dqctl(sb)->dq_op->write_info(sb, cnt);
dqstats_inc(DQST_SYNCS);
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
- if (!wait || (dqopts(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+ if (!wait || (dqctl(sb)->flags & DQUOT_QUOTA_SYS_FILE))
return 0;
/* This is not very clever (and fast) but currently I don't know about
@@ -653,18 +654,19 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
- mutex_lock(&dqopts(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
+ dqopt = dqopts(sb);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&dqopts(sb)->files[cnt]->i_mutex,
+ mutex_lock_nested(&dqopt->files[cnt]->i_mutex,
I_MUTEX_QUOTA);
- truncate_inode_pages(&dqopts(sb)->files[cnt]->i_data, 0);
- mutex_unlock(&dqopts(sb)->files[cnt]->i_mutex);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ mutex_unlock(&dqopt->files[cnt]->i_mutex);
}
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return 0;
}
@@ -743,7 +745,7 @@ we_slept:
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
spin_unlock(&dq_list_lock);
/* Commit dquot before releasing */
- ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+ ret = dqctl(dquot->dq_sb)->dq_op->write_dquot(dquot);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't write quota structure"
" (error %d). Quota may get out of sync!",
@@ -762,7 +764,7 @@ we_slept:
clear_dquot_dirty(dquot);
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
spin_unlock(&dq_list_lock);
- dquot->dq_sb->dq_op->release_dquot(dquot);
+ dqctl(dquot->dq_sb)->dq_op->release_dquot(dquot);
goto we_slept;
}
atomic_dec(&dquot->dq_count);
@@ -785,7 +787,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
{
struct dquot *dquot;
- dquot = sb->dq_op->alloc_dquot(sb, type);
+ dquot = dqctl(sb)->dq_op->alloc_dquot(sb, type);
if(!dquot)
return NULL;
@@ -858,7 +860,7 @@ we_slept:
wait_on_dquot(dquot);
/* Read the dquot / allocate space in quota file */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
- sb->dq_op->acquire_dquot(dquot) < 0) {
+ dqctl(sb)->dq_op->acquire_dquot(dquot) < 0) {
dqput(dquot);
dquot = NULL;
goto out;
@@ -1032,10 +1034,10 @@ static void drop_dquot_ref(struct super_block *sb, int type)
{
LIST_HEAD(tofree_head);
- if (sb->dq_op) {
- down_write(&dqopts(sb)->dqptr_sem);
+ if (dqctl(sb)->dq_op) {
+ down_write(&dqctl(sb)->dqptr_sem);
remove_dquot_ref(sb, type, &tofree_head);
- up_write(&dqopts(sb)->dqptr_sem);
+ up_write(&dqctl(sb)->dqptr_sem);
put_dquot_list(&tofree_head);
}
}
@@ -1081,7 +1083,7 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
- if (sb_dqopts(dquot)->flags & DQUOT_NEGATIVE_USAGE ||
+ if (dqctl(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
dquot->dq_dqb.dqb_curinodes -= number;
else
@@ -1093,7 +1095,7 @@ static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
- if (sb_dqopts(dquot)->flags & DQUOT_NEGATIVE_USAGE ||
+ if (dqctl(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
dquot->dq_dqb.dqb_curspace -= number;
else
@@ -1377,7 +1379,7 @@ static void __dquot_initialize(struct inode *inode, int type)
got[cnt] = dqget(sb, id, cnt);
}
- down_write(&dqopts(sb)->dqptr_sem);
+ down_write(&dqctl(sb)->dqptr_sem);
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1402,7 +1404,7 @@ static void __dquot_initialize(struct inode *inode, int type)
}
}
out_err:
- up_write(&dqopts(sb)->dqptr_sem);
+ up_write(&dqctl(sb)->dqptr_sem);
/* Drop unused references */
dqput_all(got);
}
@@ -1421,12 +1423,12 @@ static void __dquot_drop(struct inode *inode)
int cnt;
struct dquot *put[MAXQUOTAS];
- down_write(&dqopts(inode->i_sb)->dqptr_sem);
+ down_write(&dqctl(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt];
inode->i_dquot[cnt] = NULL;
}
- up_write(&dqopts(inode->i_sb)->dqptr_sem);
+ up_write(&dqctl(inode->i_sb)->dqptr_sem);
dqput_all(put);
}
@@ -1462,8 +1464,8 @@ static qsize_t *inode_reserved_space(struct inode * inode)
{
/* Filesystem must explicitly define it's own method in order to use
* quota reservation interface */
- BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
- return inode->i_sb->dq_op->get_reserved_space(inode);
+ BUG_ON(!dqctl(inode->i_sb)->dq_op->get_reserved_space);
+ return dqctl(inode->i_sb)->dq_op->get_reserved_space(inode);
}
void inode_add_rsv_space(struct inode *inode, qsize_t number)
@@ -1495,7 +1497,7 @@ static qsize_t inode_get_rsv_space(struct inode *inode)
{
qsize_t ret;
- if (!inode->i_sb->dq_op->get_reserved_space)
+ if (!dqctl(inode->i_sb)->dq_op->get_reserved_space)
return 0;
spin_lock(&inode->i_lock);
ret = *inode_reserved_space(inode);
@@ -1550,7 +1552,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out;
}
- down_read(&dqopts(inode->i_sb)->dqptr_sem);
+ down_read(&dqctl(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@@ -1581,7 +1583,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
mark_all_dquot_dirty(inode->i_dquot);
out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
- up_read(&dqopts(inode->i_sb)->dqptr_sem);
+ up_read(&dqctl(inode->i_sb)->dqptr_sem);
out:
return ret;
}
@@ -1601,7 +1603,7 @@ int dquot_alloc_inode(const struct inode *inode)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&dqopts(inode->i_sb)->dqptr_sem);
+ down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1622,7 +1624,7 @@ warn_put_all:
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
- up_read(&dqopts(inode->i_sb)->dqptr_sem);
+ up_read(&dqctl(inode->i_sb)->dqptr_sem);
return ret;
}
EXPORT_SYMBOL(dquot_alloc_inode);
@@ -1639,7 +1641,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
return 0;
}
- down_read(&dqopts(inode->i_sb)->dqptr_sem);
+ down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1651,7 +1653,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
inode_claim_rsv_space(inode, number);
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
- up_read(&dqopts(inode->i_sb)->dqptr_sem);
+ up_read(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1672,7 +1674,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
return;
}
- down_read(&dqopts(inode->i_sb)->dqptr_sem);
+ down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1691,7 +1693,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
mark_all_dquot_dirty(inode->i_dquot);
out_unlock:
flush_warnings(inode->i_dquot, warntype);
- up_read(&dqopts(inode->i_sb)->dqptr_sem);
+ up_read(&dqctl(inode->i_sb)->dqptr_sem);
}
EXPORT_SYMBOL(__dquot_free_space);
@@ -1708,7 +1710,7 @@ void dquot_free_inode(const struct inode *inode)
if (!dquot_active(inode))
return;
- down_read(&dqopts(inode->i_sb)->dqptr_sem);
+ down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1719,7 +1721,7 @@ void dquot_free_inode(const struct inode *inode)
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
- up_read(&dqopts(inode->i_sb)->dqptr_sem);
+ up_read(&dqctl(inode->i_sb)->dqptr_sem);
}
EXPORT_SYMBOL(dquot_free_inode);
@@ -1750,9 +1752,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype_to[cnt] = QUOTA_NL_NOWARN;
- down_write(&dqopts(inode->i_sb)->dqptr_sem);
+ down_write(&dqctl(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
- up_write(&dqopts(inode->i_sb)->dqptr_sem);
+ up_write(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
}
spin_lock(&dq_data_lock);
@@ -1804,7 +1806,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
inode->i_dquot[cnt] = transfer_to[cnt];
}
spin_unlock(&dq_data_lock);
- up_write(&dqopts(inode->i_sb)->dqptr_sem);
+ up_write(&dqctl(inode->i_sb)->dqptr_sem);
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
@@ -1818,7 +1820,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
return 0;
over_quota:
spin_unlock(&dq_data_lock);
- up_write(&dqopts(inode->i_sb)->dqptr_sem);
+ up_write(&dqctl(inode->i_sb)->dqptr_sem);
flush_warnings(transfer_to, warntype_to);
return ret;
}
@@ -1856,7 +1858,7 @@ int dquot_commit_info(struct super_block *sb, int type)
struct quota_info *dqopt = dqopts(sb);
mutex_lock(&dqopt->dqio_mutex);
- ret = dqopt->ops[type]->write_file_info(sb, type);
+ ret = dqopt->fmt_ops[type]->write_file_info(sb, type);
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
@@ -1890,13 +1892,37 @@ int dquot_file_open(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(dquot_file_open);
+/* Next two helpers called with dqonoff_mutex held */
+static int alloc_quota_info(struct quota_ctl_info *dqctl) {
+ int err = -ENOMEM;
+ struct quota_info *dqopt;
+ BUG_ON(dqctl->dq_opt);
+
+ dqopt = kzalloc(sizeof(*dqopt), GFP_NOFS);
+ if (!dqopt)
+ return err;
+
+ mutex_init(&dqopt->dqio_mutex);
+ dqctl->dq_opt = dqopt;
+ return 0;
+}
+
+static void free_quota_info(struct quota_ctl_info *dqctl)
+{
+ if (dqctl->dq_opt) {
+ kfree(dqctl->dq_opt);
+ dqctl->dq_opt = NULL;
+ }
+}
+
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
{
int cnt, ret = 0;
- struct quota_info *dqopt = dqopts(sb);
+ struct quota_ctl_info *qctl = dqctl(sb);
+ struct quota_info *dqopt;
struct inode *toputinode[MAXQUOTAS];
/* Cannot turn off usage accounting without turning off limits, or
@@ -1907,15 +1933,15 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
return -EINVAL;
/* We need to serialize quota_off() for device */
- mutex_lock(&dqopt->dqonoff_mutex);
-
+ mutex_lock(&qctl->dqonoff_mutex);
+ dqopt = dqopts(sb);
/*
* Skip everything if there's nothing to do. We have to do this because
* sometimes we are called when fill_super() failed and calling
* sync_fs() in such cases does no good.
*/
if (!sb_any_quota_loaded(sb)) {
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
return 0;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1927,16 +1953,16 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
if (flags & DQUOT_SUSPENDED) {
spin_lock(&dq_state_lock);
- dqopt->flags |=
+ qctl->flags |=
dquot_state_flag(DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
} else {
spin_lock(&dq_state_lock);
- dqopt->flags &= ~dquot_state_flag(flags, cnt);
+ qctl->flags &= ~dquot_state_flag(flags, cnt);
/* Turning off suspended quotas? */
if (!sb_has_quota_loaded(sb, cnt) &&
sb_has_quota_suspended(sb, cnt)) {
- dqopt->flags &= ~dquot_state_flag(
+ qctl->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
iput(dqopt->files[cnt]);
@@ -1958,9 +1984,9 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
* should be only users of the info. No locks needed.
*/
if (info_dirty(&dqopt->info[cnt]))
- sb->dq_op->write_info(sb, cnt);
- if (dqopt->ops[cnt]->free_file_info)
- dqopt->ops[cnt]->free_file_info(sb, cnt);
+ qctl->dq_op->write_info(sb, cnt);
+ if (dqopt->fmt_ops[cnt]->free_file_info)
+ dqopt->fmt_ops[cnt]->free_file_info(sb, cnt);
put_quota_format(dqopt->info[cnt].dqi_format);
toputinode[cnt] = dqopt->files[cnt];
@@ -1969,12 +1995,12 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
- dqopt->ops[cnt] = NULL;
+ dqopt->fmt_ops[cnt] = NULL;
}
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
/* Skip syncing and setting flags if quota files are hidden */
- if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+ if (qctl->flags & DQUOT_QUOTA_SYS_FILE)
goto put_inodes;
/* Sync the superblock so that buffers with quota data are written to
@@ -1989,7 +2015,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (toputinode[cnt]) {
- mutex_lock(&dqopt->dqonoff_mutex);
+ mutex_lock(&qctl->dqonoff_mutex);
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_loaded(sb, cnt)) {
@@ -2002,7 +2028,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty_sync(toputinode[cnt]);
}
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev);
@@ -2021,6 +2047,11 @@ put_inodes:
else if (!toputinode[cnt]->i_nlink)
ret = -EBUSY;
}
+ if (!sb_any_quota_loaded(sb)) {
+ mutex_lock(&qctl->dqonoff_mutex);
+ free_quota_info(qctl);
+ mutex_unlock(&qctl->dqonoff_mutex);
+ }
return ret;
}
EXPORT_SYMBOL(dquot_disable);
@@ -2045,7 +2076,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
{
struct quota_format_type *fmt = find_quota_format(format_id);
struct super_block *sb = inode->i_sb;
- struct quota_info *dqopt = dqopts(sb);
+ struct quota_info *dqopt;
int error;
int oldflags = -1;
@@ -2069,7 +2100,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
goto out_fmt;
}
- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ if (!(dqctl(sb)->flags & DQUOT_QUOTA_SYS_FILE)) {
/* As we bypass the pagecache we must now flush all the
* dirty data and invalidate caches so that kernel sees
* changes from userspace. It is not enough to just flush
@@ -2079,13 +2110,14 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
sync_filesystem(sb);
invalidate_bdev(sb->s_bdev);
}
- mutex_lock(&dqopt->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
+ dqopt = dqopts(sb);
if (sb_has_quota_loaded(sb, type)) {
error = -EBUSY;
goto out_lock;
}
- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ if (!(dqctl(sb)->flags & DQUOT_QUOTA_SYS_FILE)) {
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
@@ -2109,23 +2141,23 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
if (!fmt->qf_ops->check_quota_file(sb, type))
goto out_file_init;
- dqopt->ops[type] = fmt->qf_ops;
+ dqopt->fmt_ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
dqopt->info[type].dqi_fmt_id = format_id;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
mutex_lock(&dqopt->dqio_mutex);
- error = dqopt->ops[type]->read_file_info(sb, type);
+ error = dqopt->fmt_ops[type]->read_file_info(sb, type);
if (error < 0) {
mutex_unlock(&dqopt->dqio_mutex);
goto out_file_init;
}
mutex_unlock(&dqopt->dqio_mutex);
spin_lock(&dq_state_lock);
- dqopt->flags |= dquot_state_flag(flags, type);
+ dqctl(sb)->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
add_dquot_ref(sb, type);
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return 0;
@@ -2141,7 +2173,7 @@ out_lock:
inode->i_flags |= oldflags;
mutex_unlock(&inode->i_mutex);
}
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
out_fmt:
put_quota_format(fmt);
@@ -2151,7 +2183,7 @@ out_fmt:
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
{
- struct quota_info *dqopt = dqopts(sb);
+ struct quota_ctl_info *qctl = dqctl(sb);
struct inode *inode;
int ret = 0, cnt;
unsigned int flags;
@@ -2160,24 +2192,24 @@ int dquot_resume(struct super_block *sb, int type)
if (type != -1 && cnt != type)
continue;
- mutex_lock(&dqopt->dqonoff_mutex);
+ mutex_lock(&qctl->dqonoff_mutex);
if (!sb_has_quota_suspended(sb, cnt)) {
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
continue;
}
- inode = dqopt->files[cnt];
- dqopt->files[cnt] = NULL;
+ inode = qctl->dq_opt->files[cnt];
+ qctl->dq_opt->files[cnt] = NULL;
spin_lock(&dq_state_lock);
- flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
+ flags = qctl->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
cnt);
- dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
+ qctl->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
spin_unlock(&dq_state_lock);
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
flags = dquot_generic_flag(flags, cnt);
ret = vfs_load_quota_inode(inode, cnt,
- dqopt->info[cnt].dqi_fmt_id, flags);
+ dqopts(sb)->info[cnt].dqi_fmt_id, flags);
iput(inode);
}
@@ -2188,9 +2220,18 @@ EXPORT_SYMBOL(dquot_resume);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
struct path *path)
{
+ struct quota_ctl_info *qctl = dqctl(sb);
int error = security_quota_on(path->dentry);
if (error)
return error;
+
+ mutex_lock(&qctl->dqonoff_mutex);
+ if (!sb_any_quota_loaded(sb))
+ error = alloc_quota_info(qctl);
+ mutex_unlock(&qctl->dqonoff_mutex);
+ if (error)
+ goto out;
+
/* Quota file not on the same filesystem? */
if (path->mnt->mnt_sb != sb)
error = -EXDEV;
@@ -2198,6 +2239,12 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
error = vfs_load_quota_inode(path->dentry->d_inode, type,
format_id, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
+out:
+ if (!sb_any_quota_loaded(sb)) {
+ mutex_lock(&qctl->dqonoff_mutex);
+ free_quota_info(qctl);
+ mutex_unlock(&qctl->dqonoff_mutex);
+ }
return error;
}
EXPORT_SYMBOL(dquot_quota_on);
@@ -2211,7 +2258,7 @@ int dquot_enable(struct inode *inode, int type, int format_id,
{
int ret = 0;
struct super_block *sb = inode->i_sb;
- struct quota_info *dqopt = dqopts(sb);
+ struct quota_ctl_info *qctl = dqctl(sb);
/* Just unsuspend quotas? */
BUG_ON(flags & DQUOT_SUSPENDED);
@@ -2220,10 +2267,10 @@ int dquot_enable(struct inode *inode, int type, int format_id,
return 0;
/* Just updating flags needed? */
if (sb_has_quota_loaded(sb, type)) {
- mutex_lock(&dqopt->dqonoff_mutex);
+ mutex_lock(&qctl->dqonoff_mutex);
/* Now do a reliable test... */
if (!sb_has_quota_loaded(sb, type)) {
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
goto load_quota;
}
if (flags & DQUOT_USAGE_ENABLED &&
@@ -2237,10 +2284,10 @@ int dquot_enable(struct inode *inode, int type, int format_id,
goto out_lock;
}
spin_lock(&dq_state_lock);
- dqopts(sb)->flags |= dquot_state_flag(flags, type);
+ qctl->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
out_lock:
- mutex_unlock(&dqopt->dqonoff_mutex);
+ mutex_unlock(&qctl->dqonoff_mutex);
return ret;
}
@@ -2449,9 +2496,9 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
{
struct mem_dqinfo *mi;
- mutex_lock(&dqopts(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return -ESRCH;
}
mi = dqopts(sb)->info + type;
@@ -2461,7 +2508,7 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
ii->dqi_flags = mi->dqi_flags & DQF_MASK;
ii->dqi_valid = IIF_ALL;
spin_unlock(&dq_data_lock);
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return 0;
}
EXPORT_SYMBOL(dquot_get_dqinfo);
@@ -2472,7 +2519,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
struct mem_dqinfo *mi;
int err = 0;
- mutex_lock(&dqopts(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
err = -ESRCH;
goto out;
@@ -2489,9 +2536,9 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
spin_unlock(&dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
- sb->dq_op->write_info(sb, type);
+ dqctl(sb)->dq_op->write_info(sb, type);
out:
- mutex_unlock(&dqopts(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return err;
}
EXPORT_SYMBOL(dquot_set_dqinfo);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index ce8db30..5a79e09 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -47,8 +47,8 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
static void quota_sync_one(struct super_block *sb, void *arg)
{
- if (sb->s_qcop && sb->s_qcop->quota_sync)
- sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
+ if (dqctl(sb)->qcop && dqctl(sb)->qcop->quota_sync)
+ dqctl(sb)->qcop->quota_sync(sb, *(int *)arg, 1);
}
static int quota_sync_all(int type)
@@ -66,26 +66,26 @@ static int quota_sync_all(int type)
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
struct path *path)
{
- if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+ if (!dqctl(sb)->qcop->quota_on && !dqctl(sb)->qcop->quota_on_meta)
return -ENOSYS;
- if (sb->s_qcop->quota_on_meta)
- return sb->s_qcop->quota_on_meta(sb, type, id);
+ if (dqctl(sb)->qcop->quota_on_meta)
+ return dqctl(sb)->qcop->quota_on_meta(sb, type, id);
if (IS_ERR(path))
return PTR_ERR(path);
- return sb->s_qcop->quota_on(sb, type, id, path);
+ return dqctl(sb)->qcop->quota_on(sb, type, id, path);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
if (!sb_has_quota_active(sb, type)) {
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return -ESRCH;
}
- fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ fmt = dqopts(sb)->info[type].dqi_format->qf_fmt_id;
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
@@ -96,9 +96,9 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo info;
int ret;
- if (!sb->s_qcop->get_info)
+ if (!dqctl(sb)->qcop->get_info)
return -ENOSYS;
- ret = sb->s_qcop->get_info(sb, type, &info);
+ ret = dqctl(sb)->qcop->get_info(sb, type, &info);
if (!ret && copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
return ret;
@@ -110,9 +110,9 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
- if (!sb->s_qcop->set_info)
+ if (!dqctl(sb)->qcop->set_info)
return -ENOSYS;
- return sb->s_qcop->set_info(sb, type, &info);
+ return dqctl(sb)->qcop->set_info(sb, type, &info);
}
static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
@@ -135,9 +135,9 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
struct if_dqblk idq;
int ret;
- if (!sb->s_qcop->get_dqblk)
+ if (!dqctl(sb)->qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
+ ret = dqctl(sb)->qcop->get_dqblk(sb, type, id, &fdq);
if (ret)
return ret;
copy_to_if_dqblk(&idq, &fdq);
@@ -180,10 +180,10 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
- if (!sb->s_qcop->set_dqblk)
+ if (!dqctl(sb)->qcop->set_dqblk)
return -ENOSYS;
copy_from_if_dqblk(&fdq, &idq);
- return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
+ return dqctl(sb)->qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -192,9 +192,9 @@ static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
- if (!sb->s_qcop->set_xstate)
+ if (!dqctl(sb)->qcop->set_xstate)
return -ENOSYS;
- return sb->s_qcop->set_xstate(sb, flags, cmd);
+ return dqctl(sb)->qcop->set_xstate(sb, flags, cmd);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
@@ -202,9 +202,9 @@ static int quota_getxstate(struct super_block *sb, void __user *addr)
struct fs_quota_stat fqs;
int ret;
- if (!sb->s_qcop->get_xstate)
+ if (!dqctl(sb)->qcop->get_xstate)
return -ENOSYS;
- ret = sb->s_qcop->get_xstate(sb, &fqs);
+ ret = dqctl(sb)->qcop->get_xstate(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
@@ -217,9 +217,9 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
- if (!sb->s_qcop->set_dqblk)
+ if (!dqctl(sb)->qcop->set_dqblk)
return -ENOSYS;
- return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
+ return dqctl(sb)->qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
@@ -228,9 +228,9 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
struct fs_disk_quota fdq;
int ret;
- if (!sb->s_qcop->get_dqblk)
+ if (!dqctl(sb)->qcop->get_dqblk)
return -ENOSYS;
- ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
+ ret = dqctl(sb)->qcop->get_dqblk(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
@@ -244,7 +244,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
- if (!sb->s_qcop)
+ if (!dqctl(sb)->qcop)
return -ENOSYS;
ret = check_quotactl_permission(sb, type, cmd, id);
@@ -255,9 +255,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
- if (!sb->s_qcop->quota_off)
+ if (!dqctl(sb)->qcop->quota_off)
return -ENOSYS;
- return sb->s_qcop->quota_off(sb, type);
+ return dqctl(sb)->qcop->quota_off(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
@@ -269,9 +269,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
case Q_SETQUOTA:
return quota_setquota(sb, type, id, addr);
case Q_SYNC:
- if (!sb->s_qcop->quota_sync)
+ if (!dqctl(sb)->qcop->quota_sync)
return -ENOSYS;
- return sb->s_qcop->quota_sync(sb, type, 1);
+ return dqctl(sb)->qcop->quota_sync(sb, type, 1);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 707f9dc..4a51677 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1407,8 +1407,8 @@ static int read_super_block(struct super_block *s, int offset)
s->s_op = &reiserfs_sops;
s->s_export_op = &reiserfs_export_ops;
#ifdef CONFIG_QUOTA
- s->s_qcop = &reiserfs_qctl_operations;
- s->dq_op = &reiserfs_quota_operations;
+ dqctl(s)->qcop = &reiserfs_qctl_operations;
+ dqctl(s)->dq_op = &reiserfs_quota_operations;
#endif
/* new format is limited by the 32 bit wide i_blocks field, want to
diff --git a/fs/super.c b/fs/super.c
index 8819e3a..9eea8e9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -103,7 +103,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
- mutex_init(&s->s_dquot.dqio_mutex);
mutex_init(&s->s_dquot.dqonoff_mutex);
init_rwsem(&s->s_dquot.dqptr_sem);
init_waitqueue_head(&s->s_wait_unfrozen);
diff --git a/fs/sync.c b/fs/sync.c
index ba76b96..891e8ef 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -36,8 +36,8 @@ static int __sync_filesystem(struct super_block *sb, int wait)
if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
return 0;
- if (sb->s_qcop && sb->s_qcop->quota_sync)
- sb->s_qcop->quota_sync(sb, -1, wait);
+ if (sb->s_dquot.qcop && sb->s_dquot.qcop->quota_sync)
+ sb->s_dquot.qcop->quota_sync(sb, -1, wait);
if (wait)
sync_inodes_sb(sb);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index a4e0797..4e59a08 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1510,7 +1510,7 @@ xfs_fs_fill_super(
sb->s_xattr = xfs_xattr_handlers;
sb->s_export_op = &xfs_export_operations;
#ifdef CONFIG_XFS_QUOTA
- sb->s_qcop = &xfs_quotactl_operations;
+ sb->s_dquot.qcop = &xfs_quotactl_operations;
#endif
sb->s_op = &xfs_super_operations;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 63d069b..e87694a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1324,8 +1324,6 @@ struct super_block {
loff_t s_maxbytes; /* Max file size */
struct file_system_type *s_type;
const struct super_operations *s_op;
- const struct dquot_operations *dq_op;
- const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
unsigned long s_magic;
@@ -1354,7 +1352,7 @@ struct super_block {
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct list_head s_instances;
- struct quota_info s_dquot; /* Diskquota specific options */
+ struct quota_ctl_info s_dquot; /* Diskquota specific options */
int s_frozen;
wait_queue_head_t s_wait_unfrozen;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 00e1b3d..6803834 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -324,7 +324,7 @@ struct dquot_operations {
};
struct path;
-
+struct quota_info;
/* Operations handling requests from userspace */
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -392,15 +392,20 @@ static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
return;
}
#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
+struct quota_ctl_info {
+ unsigned int flags; /* Flags for diskquotas on this device */
+ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
+ struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
+ const struct quotactl_ops *qcop;
+ const struct dquot_operations *dq_op;
+ struct quota_info *dq_opt;
+};
struct quota_info {
- unsigned int flags; /* Flags for diskquotas on this device */
struct mutex dqio_mutex; /* lock device while I/O in progress */
- struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
- struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
- struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
- const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
+ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
+ const struct quota_format_ops *fmt_ops[MAXQUOTAS]; /* Operations for each type */
};
int register_quota_format(struct quota_format_type *fmt);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index b154d52..6f4cc74 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -13,15 +13,20 @@
#define DQUOT_SPACE_RESERVE 0x2
#define DQUOT_SPACE_NOFAIL 0x4
-static inline struct quota_info *dqopts(struct super_block *sb)
+static inline struct quota_ctl_info* dqctl( struct super_block *sb)
{
return &sb->s_dquot;
}
-static inline struct quota_info* sb_dqopts(struct dquot *dq)
+static inline struct quota_info *dqopts(const struct super_block *sb)
+{
+ return sb->s_dquot.dq_opt;
+}
+static inline struct quota_info* sb_dqopts(const struct dquot *dq)
{
return dqopts(dq->dq_sb);
}
+
/* i_mutex must being held */
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
{
@@ -108,19 +113,19 @@ static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type)
{
- return dqopts(sb)->flags &
+ return dqctl(sb)->flags &
dquot_state_flag(DQUOT_USAGE_ENABLED, type);
}
static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type)
{
- return dqopts(sb)->flags &
+ return dqctl(sb)->flags &
dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
}
static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
{
- return dqopts(sb)->flags &
+ return dqctl(sb)->flags &
dquot_state_flag(DQUOT_SUSPENDED, type);
}
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 04/19] quota: Convert dq_state_lock to per-sb dq_state_lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (2 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 03/19] quota: mode quota internals from sb to quota_info Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 05/19] quota: add quota format lock Dmitry Monakhov
` (14 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Currently dq_state_lock is global, which is bad for scalability.
In fact different super_blocks have no shared quota data.
So we may simply convert the global lock to per-sb locks.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 30 +++++++++++++++---------------
include/linux/quota.h | 1 +
2 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index b87435d..5dfbf9c 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -128,7 +128,6 @@
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
@@ -821,13 +820,13 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
return NULL;
we_slept:
spin_lock(&dq_list_lock);
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopts(sb)->dq_state_lock);
if (!sb_has_quota_active(sb, type)) {
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopts(sb)->dq_state_lock);
spin_unlock(&dq_list_lock);
goto out;
}
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopts(sb)->dq_state_lock);
dquot = find_dquot(hashent, sb, id, type);
if (!dquot) {
@@ -1903,6 +1902,7 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
return err;
mutex_init(&dqopt->dqio_mutex);
+ spin_lock_init(&dqopt->dq_state_lock);
dqctl->dq_opt = dqopt;
return 0;
}
@@ -1952,24 +1952,24 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
continue;
if (flags & DQUOT_SUSPENDED) {
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopt->dq_state_lock);
qctl->flags |=
dquot_state_flag(DQUOT_SUSPENDED, cnt);
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
} else {
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopt->dq_state_lock);
qctl->flags &= ~dquot_state_flag(flags, cnt);
/* Turning off suspended quotas? */
if (!sb_has_quota_loaded(sb, cnt) &&
sb_has_quota_suspended(sb, cnt)) {
qctl->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
iput(dqopt->files[cnt]);
dqopt->files[cnt] = NULL;
continue;
}
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
}
/* We still have to keep quota loaded? */
@@ -2152,9 +2152,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
goto out_file_init;
}
mutex_unlock(&dqopt->dqio_mutex);
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopt->dq_state_lock);
dqctl(sb)->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
add_dquot_ref(sb, type);
mutex_unlock(&dqctl(sb)->dqonoff_mutex);
@@ -2199,12 +2199,12 @@ int dquot_resume(struct super_block *sb, int type)
}
inode = qctl->dq_opt->files[cnt];
qctl->dq_opt->files[cnt] = NULL;
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopts(sb)->dq_state_lock);
flags = qctl->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
cnt);
qctl->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopts(sb)->dq_state_lock);
mutex_unlock(&qctl->dqonoff_mutex);
flags = dquot_generic_flag(flags, cnt);
@@ -2283,9 +2283,9 @@ int dquot_enable(struct inode *inode, int type, int format_id,
ret = -EBUSY;
goto out_lock;
}
- spin_lock(&dq_state_lock);
+ spin_lock(&dqopts(sb)->dq_state_lock);
qctl->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dq_state_lock);
+ spin_unlock(&dqopts(sb)->dq_state_lock);
out_lock:
mutex_unlock(&qctl->dqonoff_mutex);
return ret;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 6803834..3fca71f 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -404,6 +404,7 @@ struct quota_ctl_info {
struct quota_info {
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
+ spinlock_t dq_state_lock; /* serialize quota state changes*/
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
const struct quota_format_ops *fmt_ops[MAXQUOTAS]; /* Operations for each type */
};
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 05/19] quota: add quota format lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (3 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 04/19] quota: Convert dq_state_lock to per-sb dq_state_lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 06/19] quota: make dquot lists per-sb Dmitry Monakhov
` (13 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Currently dq_list_lock is responsible for quota format protection
which is counter productive. Introduce dedicated lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5dfbf9c..31d6b44 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,7 +82,6 @@
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats.
* dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
* also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
@@ -128,6 +127,7 @@
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_fmt_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
@@ -158,10 +158,10 @@ static struct kmem_cache *dquot_cachep;
int register_quota_format(struct quota_format_type *fmt)
{
- spin_lock(&dq_list_lock);
+ spin_lock(&dq_fmt_lock);
fmt->qf_next = quota_formats;
quota_formats = fmt;
- spin_unlock(&dq_list_lock);
+ spin_unlock(&dq_fmt_lock);
return 0;
}
EXPORT_SYMBOL(register_quota_format);
@@ -170,13 +170,13 @@ void unregister_quota_format(struct quota_format_type *fmt)
{
struct quota_format_type **actqf;
- spin_lock(&dq_list_lock);
+ spin_lock(&dq_fmt_lock);
for (actqf = "a_formats; *actqf && *actqf != fmt;
actqf = &(*actqf)->qf_next)
;
if (*actqf)
*actqf = (*actqf)->qf_next;
- spin_unlock(&dq_list_lock);
+ spin_unlock(&dq_fmt_lock);
}
EXPORT_SYMBOL(unregister_quota_format);
@@ -184,14 +184,14 @@ static struct quota_format_type *find_quota_format(int id)
{
struct quota_format_type *actqf;
- spin_lock(&dq_list_lock);
+ spin_lock(&dq_fmt_lock);
for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
actqf = actqf->qf_next)
;
if (!actqf || !try_module_get(actqf->qf_owner)) {
int qm;
- spin_unlock(&dq_list_lock);
+ spin_unlock(&dq_fmt_lock);
for (qm = 0; module_names[qm].qm_fmt_id &&
module_names[qm].qm_fmt_id != id; qm++)
@@ -200,14 +200,14 @@ static struct quota_format_type *find_quota_format(int id)
request_module(module_names[qm].qm_mod_name))
return NULL;
- spin_lock(&dq_list_lock);
+ spin_lock(&dq_fmt_lock);
for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
actqf = actqf->qf_next)
;
if (actqf && !try_module_get(actqf->qf_owner))
actqf = NULL;
}
- spin_unlock(&dq_list_lock);
+ spin_unlock(&dq_fmt_lock);
return actqf;
}
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 06/19] quota: make dquot lists per-sb
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (4 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 05/19] quota: add quota format lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 07/19] quota: make per-sb hash array Dmitry Monakhov
` (12 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Currently quota lists are global which is very bad for scalability.
* inuse_lists -> sb->s_dquot->dq_inuse_list
* free_lists -> sb->s_dquot->dq_free_lists
* Add per sb lock for quota's lists protection
Do not remove dq_lists_lock is used now only for protecting quota_hash
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 88 +++++++++++++++++++++++++++++++++++++++---------
include/linux/quota.h | 4 ++
2 files changed, 75 insertions(+), 17 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 31d6b44..324f124 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -90,7 +90,8 @@
* about latest values take it as well.
*
* The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
- * dq_list_lock > dq_state_lock
+ * dq_list_lock > sb->s_dquot->dq_state_lock
+ * dq_list_lock > sb->s_dquot->dq_list_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
@@ -236,8 +237,6 @@ static void put_quota_format(struct quota_format_type *fmt)
* mechanism to locate a specific dquot.
*/
-static LIST_HEAD(inuse_list);
-static LIST_HEAD(free_dquots);
static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
@@ -289,7 +288,7 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
/* Add a dquot to the tail of the free list */
static inline void put_dquot_last(struct dquot *dquot)
{
- list_add_tail(&dquot->dq_free, &free_dquots);
+ list_add_tail(&dquot->dq_free, &sb_dqopts(dquot)->dq_free_list);
dqstats_inc(DQST_FREE_DQUOTS);
}
@@ -305,7 +304,7 @@ static inline void put_inuse(struct dquot *dquot)
{
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
- list_add_tail(&dquot->dq_inuse, &inuse_list);
+ list_add_tail(&dquot->dq_inuse, &sb_dqopts(dquot)->dq_inuse_list);
dqstats_inc(DQST_ALLOC_DQUOTS);
}
@@ -338,17 +337,20 @@ static inline int mark_dquot_dirty(struct dquot *dquot)
int dquot_mark_dquot_dirty(struct dquot *dquot)
{
int ret = 1;
+ struct quota_info *dqopt = sb_dqopts(dquot);
/* If quota is dirty already, we don't have to acquire dq_list_lock */
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return 1;
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
- list_add(&dquot->dq_dirty, &sb_dqopts(dquot)->
- info[dquot->dq_type].dqi_dirty_list);
+ list_add(&dquot->dq_dirty,
+ &dqopt->info[dquot->dq_type].dqi_dirty_list);
ret = 0;
}
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
return ret;
}
@@ -442,10 +444,13 @@ int dquot_commit(struct dquot *dquot)
mutex_lock(&dqopt->dqio_mutex);
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
if (!clear_dquot_dirty(dquot)) {
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
goto out_sem;
}
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
@@ -515,10 +520,12 @@ static inline void do_destroy_dquot(struct dquot *dquot)
static void invalidate_dquots(struct super_block *sb, int type)
{
struct dquot *dquot, *tmp;
+ struct quota_info *dqopt = dqopts(sb);
restart:
spin_lock(&dq_list_lock);
- list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+ spin_lock(&dqopt->dq_list_lock);
+ list_for_each_entry_safe(dquot, tmp, &dqopt->dq_inuse_list, dq_inuse) {
if (dquot->dq_sb != sb)
continue;
if (dquot->dq_type != type)
@@ -530,6 +537,7 @@ restart:
atomic_inc(&dquot->dq_count);
prepare_to_wait(&dquot->dq_wait_unused, &wait,
TASK_UNINTERRUPTIBLE);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
/* Once dqput() wakes us up, we know it's time to free
* the dquot.
@@ -556,6 +564,7 @@ restart:
remove_inuse(dquot);
do_destroy_dquot(dquot);
}
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
}
@@ -565,17 +574,21 @@ int dquot_scan_active(struct super_block *sb,
unsigned long priv)
{
struct dquot *dquot, *old_dquot = NULL;
+ struct quota_info *dqopt;
int ret = 0;
mutex_lock(&dqctl(sb)->dqonoff_mutex);
+ dqopt = dqopts(sb);
spin_lock(&dq_list_lock);
- list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+ spin_lock(&dqopt->dq_list_lock);
+ list_for_each_entry(dquot, &dqopt->dq_inuse_list, dq_inuse) {
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
continue;
if (dquot->dq_sb != sb)
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
@@ -584,9 +597,11 @@ int dquot_scan_active(struct super_block *sb,
if (ret < 0)
goto out;
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
}
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
out:
dqput(old_dquot);
@@ -610,6 +625,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
if (!sb_has_quota_active(sb, cnt))
continue;
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
dirty = &dqopt->info[cnt].dqi_dirty_list;
while (!list_empty(dirty)) {
dquot = list_first_entry(dirty, struct dquot,
@@ -623,12 +639,15 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* holding reference so we can safely just increase
* use count */
atomic_inc(&dquot->dq_count);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqctl(sb)->dq_op->write_dquot(dquot);
dqput(dquot);
+ spin_lock(&dqopt->dq_list_lock);
spin_lock(&dq_list_lock);
}
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
}
@@ -672,23 +691,36 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
EXPORT_SYMBOL(dquot_quota_sync);
/* Free unused dquots from cache */
-static void prune_dqcache(int count)
+static void prune_one_sb_dqcache(struct super_block *sb, void *arg)
{
struct list_head *head;
struct dquot *dquot;
+ struct quota_info *dqopt = dqopts(sb);
+ int count = *(int*) arg;
- head = free_dquots.prev;
- while (head != &free_dquots && count) {
+ mutex_lock(&dqctl(sb)->dqonoff_mutex);
+ if (!sb_any_quota_loaded(sb)) {
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
+ return;
+ }
+ spin_lock(&dqopt->dq_list_lock);
+ head = dqopt->dq_free_list.prev;
+ while (head != &dqopt->dq_free_list && count) {
dquot = list_entry(head, struct dquot, dq_free);
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
count--;
- head = free_dquots.prev;
+ head = dqopt->dq_free_list.prev;
}
+ spin_unlock(&dqopt->dq_list_lock);
+ mutex_unlock(&dqctl(sb)->dqonoff_mutex);
+}
+static void prune_dqcache(int count)
+{
+ iterate_supers(prune_one_sb_dqcache, &count);
}
-
/*
* This is called from kswapd when we think we need some
* more memory
@@ -717,6 +749,7 @@ static struct shrinker dqcache_shrinker = {
void dqput(struct dquot *dquot)
{
int ret;
+ struct quota_info *dqopt;
if (!dquot)
return;
@@ -727,9 +760,11 @@ void dqput(struct dquot *dquot)
BUG();
}
#endif
+ dqopt = sb_dqopts(dquot);
dqstats_inc(DQST_DROPS);
we_slept:
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
/* We have more than one user... nothing to do */
atomic_dec(&dquot->dq_count);
@@ -737,11 +772,13 @@ we_slept:
if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
atomic_read(&dquot->dq_count) == 1)
wake_up(&dquot->dq_wait_unused);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
return;
}
/* Need to release dquot? */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
/* Commit dquot before releasing */
ret = dqctl(dquot->dq_sb)->dq_op->write_dquot(dquot);
@@ -754,7 +791,9 @@ we_slept:
* infinite loop here
*/
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
clear_dquot_dirty(dquot);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
}
goto we_slept;
@@ -762,6 +801,7 @@ we_slept:
/* Clear flag in case dquot was inactive (something bad happened) */
clear_dquot_dirty(dquot);
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
dqctl(dquot->dq_sb)->dq_op->release_dquot(dquot);
goto we_slept;
@@ -772,6 +812,7 @@ we_slept:
BUG_ON(!list_empty(&dquot->dq_free));
#endif
put_dquot_last(dquot);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
}
EXPORT_SYMBOL(dqput);
@@ -815,22 +856,26 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
unsigned int hashent = hashfn(sb, id, type);
struct dquot *dquot = NULL, *empty = NULL;
+ struct quota_info *dqopt = dqopts(sb);
if (!sb_has_quota_active(sb, type))
return NULL;
we_slept:
spin_lock(&dq_list_lock);
- spin_lock(&dqopts(sb)->dq_state_lock);
+ spin_lock(&dqopt->dq_list_lock);
+ spin_lock(&dqopt->dq_state_lock);
if (!sb_has_quota_active(sb, type)) {
- spin_unlock(&dqopts(sb)->dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
goto out;
}
- spin_unlock(&dqopts(sb)->dq_state_lock);
+ spin_unlock(&dqopt->dq_state_lock);
dquot = find_dquot(hashent, sb, id, type);
if (!dquot) {
if (!empty) {
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
empty = get_empty_dquot(sb, type);
if (!empty)
@@ -844,12 +889,14 @@ we_slept:
put_inuse(dquot);
/* hash it first so it can be found */
insert_dquot_hash(dquot);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);
@@ -955,6 +1002,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head)
{
struct dquot *dquot = inode->i_dquot[type];
+ struct quota_info *dqopt = dqopts(inode->i_sb);
inode->i_dquot[type] = NULL;
if (dquot) {
@@ -966,9 +1014,11 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
atomic_read(&dquot->dq_count));
#endif
spin_lock(&dq_list_lock);
+ spin_lock(&dqopt->dq_list_lock);
/* As dquot must have currently users it can't be on
* the free list... */
list_add(&dquot->dq_free, tofree_head);
+ spin_unlock(&dqopt->dq_list_lock);
spin_unlock(&dq_list_lock);
return 1;
}
@@ -1903,6 +1953,10 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
mutex_init(&dqopt->dqio_mutex);
spin_lock_init(&dqopt->dq_state_lock);
+ spin_lock_init(&dqopt->dq_list_lock);
+ INIT_LIST_HEAD(&dqopt->dq_inuse_list);
+ INIT_LIST_HEAD(&dqopt->dq_free_list);
+
dqctl->dq_opt = dqopt;
return 0;
}
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 3fca71f..bb63abf 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -405,6 +405,10 @@ struct quota_info {
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
spinlock_t dq_state_lock; /* serialize quota state changes*/
+ spinlock_t dq_list_lock; /* protect lists */
+ struct list_head dq_inuse_list; /* list of inused dquotas */
+ struct list_head dq_free_list; /* list of free dquotas */
+
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
const struct quota_format_ops *fmt_ops[MAXQUOTAS]; /* Operations for each type */
};
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 07/19] quota: make per-sb hash array
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (5 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 06/19] quota: make dquot lists per-sb Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-27 19:31 ` Al Viro
2010-10-22 17:34 ` [PATCH 08/19] quota: remove global dq_list_lock Dmitry Monakhov
` (11 subsequent siblings)
18 siblings, 1 reply; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Currently quota_hash[] is global, which is bad for scalability.
Also is is the last user of global dq_list_lock.
It is reasonable to introduce dedicated hash for each super_block
which use quota.
per-sb hash will be allocated only when necessary (on first quota_on())
Protected by per-sb dq_list_lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 97 ++++++++++++++++++++++++++++++-------------------
include/linux/quota.h | 11 +++++-
2 files changed, 69 insertions(+), 39 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 324f124..822d7ad 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -220,7 +220,7 @@ static void put_quota_format(struct quota_format_type *fmt)
/*
* Dquot List Management:
* The quota code uses three lists for dquot management: the inuse_list,
- * free_dquots, and dquot_hash[] array. A single dquot structure may be
+ * free_dquots, and dq_hash[] array. A single dquot structure may be
* on all three lists, depending on its current state.
*
* All dquots are placed to the end of inuse_list when first created, and this
@@ -233,13 +233,10 @@ static void put_quota_format(struct quota_format_type *fmt)
* dquot is invalidated it's completely released from memory.
*
* Dquots with a specific identity (device, type and id) are placed on
- * one of the dquot_hash[] hash chains. The provides an efficient search
+ * one of the dq_hash[] hash chains. The provides an efficient search
* mechanism to locate a specific dquot.
*/
-static unsigned int dq_hash_bits, dq_hash_mask;
-static struct hlist_head *dquot_hash;
-
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
@@ -251,8 +248,9 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
{
unsigned long tmp;
- tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
- return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
+ tmp = id * (MAXQUOTAS - type);
+ return (tmp + (tmp >> dqopts(sb)->dq_hash.bits)) &
+ dqopts(sb)->dq_hash.mask;
}
/*
@@ -261,7 +259,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
static inline void insert_dquot_hash(struct dquot *dquot)
{
struct hlist_head *head;
- head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+ head = sb_dqopts(dquot)->dq_hash.head +
+ hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
hlist_add_head(&dquot->dq_hash, head);
}
@@ -270,13 +269,17 @@ static inline void remove_dquot_hash(struct dquot *dquot)
hlist_del_init(&dquot->dq_hash);
}
-static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+static struct dquot *find_dquot(struct super_block *sb,
unsigned int id, int type)
{
struct hlist_node *node;
struct dquot *dquot;
+ unsigned int hashent = hashfn(sb, id, type);
- hlist_for_each (node, dquot_hash+hashent) {
+ if (!dqopts(sb)->dq_hash.head)
+ return NULL;
+
+ hlist_for_each(node, dqopts(sb)->dq_hash.head + hashent) {
dquot = hlist_entry(node, struct dquot, dq_hash);
if (dquot->dq_sb == sb && dquot->dq_id == id &&
dquot->dq_type == type)
@@ -854,7 +857,6 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
*/
struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
- unsigned int hashent = hashfn(sb, id, type);
struct dquot *dquot = NULL, *empty = NULL;
struct quota_info *dqopt = dqopts(sb);
@@ -872,7 +874,7 @@ we_slept:
}
spin_unlock(&dqopt->dq_state_lock);
- dquot = find_dquot(hashent, sb, id, type);
+ dquot = find_dquot(sb, id, type);
if (!dquot) {
if (!empty) {
spin_unlock(&dqopt->dq_list_lock);
@@ -1927,6 +1929,42 @@ const struct dquot_operations dquot_operations = {
};
EXPORT_SYMBOL(dquot_operations);
+int alloc_quota_hash(struct quota_info *dqopt, int order)
+{
+ unsigned long nr_hash, i;
+ struct hlist_head *hash_array;
+ struct dquot_hash *dq_hash = &dqopt->dq_hash;
+
+ hash_array = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
+ if (!hash_array)
+ return -ENOMEM;
+
+ /* Find power-of-two hlist_heads which can fit into allocation */
+ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
+ dq_hash->bits = ilog2(nr_hash);
+ nr_hash = 1UL << dq_hash->bits;
+ dq_hash->mask = nr_hash - 1;
+ for (i = 0; i < nr_hash; i++)
+ INIT_HLIST_HEAD(hash_array + i);
+ dq_hash->order = order;
+ dq_hash->head = hash_array;
+ return 0;
+}
+
+void free_quota_hash(struct quota_info *dqopt)
+{
+
+ struct dquot_hash *dq_hash = &dqopt->dq_hash;
+ unsigned long i, nr_hash = 1UL << dq_hash->bits;
+ unsigned long addr = (unsigned long )dq_hash->head;
+
+ for (i = 0; i < nr_hash; i++)
+ WARN_ON(!hlist_empty(dq_hash->head + i));
+
+ dq_hash->head = NULL;
+ free_pages(addr, dq_hash->order);
+}
+
/*
* Generic helper for ->open on filesystems supporting disk quotas.
*/
@@ -1956,14 +1994,21 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
spin_lock_init(&dqopt->dq_list_lock);
INIT_LIST_HEAD(&dqopt->dq_inuse_list);
INIT_LIST_HEAD(&dqopt->dq_free_list);
-
dqctl->dq_opt = dqopt;
- return 0;
+ err = alloc_quota_hash(dqopt, 0);
+
+ if (err && dqopt) {
+ free_quota_hash(dqopt);
+ kfree(dqopt);
+ dqctl->dq_opt = NULL;
+ }
+ return err;
}
static void free_quota_info(struct quota_ctl_info *dqctl)
{
if (dqctl->dq_opt) {
+ free_quota_hash(dqctl->dq_opt);
kfree(dqctl->dq_opt);
dqctl->dq_opt = NULL;
}
@@ -2709,8 +2754,6 @@ static ctl_table sys_table[] = {
static int __init dquot_init(void)
{
int i, ret;
- unsigned long nr_hash, order;
-
printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
register_sysctl_table(sys_table);
@@ -2721,33 +2764,11 @@ static int __init dquot_init(void)
SLAB_MEM_SPREAD|SLAB_PANIC),
NULL);
- order = 0;
- dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
- if (!dquot_hash)
- panic("Cannot create dquot hash table");
-
for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
ret = percpu_counter_init(&dqstats.counter[i], 0);
if (ret)
panic("Cannot create dquot stat counters");
}
-
- /* Find power-of-two hlist_heads which can fit into allocation */
- nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
- dq_hash_bits = 0;
- do {
- dq_hash_bits++;
- } while (nr_hash >> dq_hash_bits);
- dq_hash_bits--;
-
- nr_hash = 1UL << dq_hash_bits;
- dq_hash_mask = nr_hash - 1;
- for (i = 0; i < nr_hash; i++)
- INIT_HLIST_HEAD(dquot_hash + i);
-
- printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
- nr_hash, order, (PAGE_SIZE << order));
-
register_shrinker(&dqcache_shrinker);
return 0;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index bb63abf..eaa9f91 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -401,13 +401,22 @@ struct quota_ctl_info {
const struct dquot_operations *dq_op;
struct quota_info *dq_opt;
};
+
+struct dquot_hash {
+ struct hlist_head *head;
+ unsigned int order;
+ unsigned int bits;
+ unsigned int mask;
+};
+
struct quota_info {
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
spinlock_t dq_state_lock; /* serialize quota state changes*/
- spinlock_t dq_list_lock; /* protect lists */
+ spinlock_t dq_list_lock; /* protect lists and hash*/
struct list_head dq_inuse_list; /* list of inused dquotas */
struct list_head dq_free_list; /* list of free dquotas */
+ struct dquot_hash dq_hash; /* dquot lookup hash */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
const struct quota_format_ops *fmt_ops[MAXQUOTAS]; /* Operations for each type */
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH 07/19] quota: make per-sb hash array
2010-10-22 17:34 ` [PATCH 07/19] quota: make per-sb hash array Dmitry Monakhov
@ 2010-10-27 19:31 ` Al Viro
2010-10-28 10:58 ` Dmitry
0 siblings, 1 reply; 26+ messages in thread
From: Al Viro @ 2010-10-27 19:31 UTC (permalink / raw)
To: Dmitry Monakhov; +Cc: linux-fsdevel, jack, hch, Dmitry Monakhov
On Fri, Oct 22, 2010 at 09:34:52PM +0400, Dmitry Monakhov wrote:
> From: Dmitry Monakhov <dmonakhov@gmail.com>
>
> Currently quota_hash[] is global, which is bad for scalability.
> Also is is the last user of global dq_list_lock.
> It is reasonable to introduce dedicated hash for each super_block
> which use quota.
>
> per-sb hash will be allocated only when necessary (on first quota_on())
> Protected by per-sb dq_list_lock.
Ugh... Why not a common hash with per-chain spinlock? We'll waste less
memory on those than on hash chain heads even for a couple of superblocks
and unless you've got boxen with more (quota'd) superblocks than hash chains,
you'll get less contention...
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH 07/19] quota: make per-sb hash array
2010-10-27 19:31 ` Al Viro
@ 2010-10-28 10:58 ` Dmitry
0 siblings, 0 replies; 26+ messages in thread
From: Dmitry @ 2010-10-28 10:58 UTC (permalink / raw)
To: Al Viro; +Cc: linux-fsdevel, jack, hch
On Wed, 27 Oct 2010 20:31:09 +0100, Al Viro <viro@ZenIV.linux.org.uk> wrote:
> On Fri, Oct 22, 2010 at 09:34:52PM +0400, Dmitry Monakhov wrote:
> > From: Dmitry Monakhov <dmonakhov@gmail.com>
> >
> > Currently quota_hash[] is global, which is bad for scalability.
> > Also is is the last user of global dq_list_lock.
> > It is reasonable to introduce dedicated hash for each super_block
> > which use quota.
> >
> > per-sb hash will be allocated only when necessary (on first quota_on())
> > Protected by per-sb dq_list_lock.
>
> Ugh... Why not a common hash with per-chain spinlock? We'll waste less
> memory on those than on hash chain heads even for a couple of superblocks
> and unless you've got boxen with more (quota'd) superblocks than hash chains,
> you'll get less contention...
Ok, i'll use "kernel: add bl_list" per-backet list implementation from nick@.
> --
> To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH 08/19] quota: remove global dq_list_lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (6 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 07/19] quota: make per-sb hash array Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 09/19] quota: rename dq_lock Dmitry Monakhov
` (10 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
dq_list_lock is no longer responsible for any synchronization,
and we may remove it now.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 37 ++-----------------------------------
1 files changed, 2 insertions(+), 35 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 822d7ad..2d08996 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -90,8 +90,8 @@
* about latest values take it as well.
*
* The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
- * dq_list_lock > sb->s_dquot->dq_state_lock
- * dq_list_lock > sb->s_dquot->dq_list_lock
+ * dq_list_lock > dq_state_lock
+ * dq_list_lock > dq_list_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
@@ -127,7 +127,6 @@
* i_mutex on quota files is special (it's below dqio_mutex)
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_fmt_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
@@ -346,7 +345,6 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return 1;
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
list_add(&dquot->dq_dirty,
@@ -354,7 +352,6 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
ret = 0;
}
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
return ret;
}
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
@@ -446,15 +443,12 @@ int dquot_commit(struct dquot *dquot)
struct quota_info *dqopt = sb_dqopts(dquot);
mutex_lock(&dqopt->dqio_mutex);
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
if (!clear_dquot_dirty(dquot)) {
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
goto out_sem;
}
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
@@ -526,7 +520,6 @@ static void invalidate_dquots(struct super_block *sb, int type)
struct quota_info *dqopt = dqopts(sb);
restart:
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
list_for_each_entry_safe(dquot, tmp, &dqopt->dq_inuse_list, dq_inuse) {
if (dquot->dq_sb != sb)
@@ -541,7 +534,6 @@ restart:
prepare_to_wait(&dquot->dq_wait_unused, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
/* Once dqput() wakes us up, we know it's time to free
* the dquot.
* IMPORTANT: we rely on the fact that there is always
@@ -568,7 +560,6 @@ restart:
do_destroy_dquot(dquot);
}
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
}
/* Call callback for every active dquot on given filesystem */
@@ -582,7 +573,6 @@ int dquot_scan_active(struct super_block *sb,
mutex_lock(&dqctl(sb)->dqonoff_mutex);
dqopt = dqopts(sb);
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
list_for_each_entry(dquot, &dqopt->dq_inuse_list, dq_inuse) {
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
@@ -592,20 +582,17 @@ int dquot_scan_active(struct super_block *sb,
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
ret = fn(dquot, priv);
if (ret < 0)
goto out;
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
}
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
out:
dqput(old_dquot);
mutex_unlock(&dqctl(sb)->dqonoff_mutex);
@@ -627,7 +614,6 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
dirty = &dqopt->info[cnt].dqi_dirty_list;
while (!list_empty(dirty)) {
@@ -643,15 +629,12 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqctl(sb)->dq_op->write_dquot(dquot);
dqput(dquot);
spin_lock(&dqopt->dq_list_lock);
- spin_lock(&dq_list_lock);
}
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
@@ -731,9 +714,7 @@ static void prune_dqcache(int count)
static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
if (nr) {
- spin_lock(&dq_list_lock);
prune_dqcache(nr);
- spin_unlock(&dq_list_lock);
}
return ((unsigned)
percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
@@ -766,7 +747,6 @@ void dqput(struct dquot *dquot)
dqopt = sb_dqopts(dquot);
dqstats_inc(DQST_DROPS);
we_slept:
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
/* We have more than one user... nothing to do */
@@ -776,13 +756,11 @@ we_slept:
atomic_read(&dquot->dq_count) == 1)
wake_up(&dquot->dq_wait_unused);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
return;
}
/* Need to release dquot? */
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
/* Commit dquot before releasing */
ret = dqctl(dquot->dq_sb)->dq_op->write_dquot(dquot);
if (ret < 0) {
@@ -793,11 +771,9 @@ we_slept:
* We clear dirty bit anyway, so that we avoid
* infinite loop here
*/
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
clear_dquot_dirty(dquot);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
}
goto we_slept;
}
@@ -805,7 +781,6 @@ we_slept:
clear_dquot_dirty(dquot);
if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
dqctl(dquot->dq_sb)->dq_op->release_dquot(dquot);
goto we_slept;
}
@@ -816,7 +791,6 @@ we_slept:
#endif
put_dquot_last(dquot);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
}
EXPORT_SYMBOL(dqput);
@@ -863,13 +837,11 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
if (!sb_has_quota_active(sb, type))
return NULL;
we_slept:
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
spin_lock(&dqopt->dq_state_lock);
if (!sb_has_quota_active(sb, type)) {
spin_unlock(&dqopt->dq_state_lock);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
goto out;
}
spin_unlock(&dqopt->dq_state_lock);
@@ -878,7 +850,6 @@ we_slept:
if (!dquot) {
if (!empty) {
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
empty = get_empty_dquot(sb, type);
if (!empty)
schedule(); /* Try to wait for a moment... */
@@ -892,14 +863,12 @@ we_slept:
/* hash it first so it can be found */
insert_dquot_hash(dquot);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);
}
@@ -1015,13 +984,11 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
"dq_count %d to dispose list",
atomic_read(&dquot->dq_count));
#endif
- spin_lock(&dq_list_lock);
spin_lock(&dqopt->dq_list_lock);
/* As dquot must have currently users it can't be on
* the free list... */
list_add(&dquot->dq_free, tofree_head);
spin_unlock(&dqopt->dq_list_lock);
- spin_unlock(&dq_list_lock);
return 1;
}
else
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 09/19] quota: rename dq_lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (7 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 08/19] quota: remove global dq_list_lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 10/19] quota: make per-sb dq_data_lock Dmitry Monakhov
` (9 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Give dquot mutex more appropriate name.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ocfs2/quota_global.c | 14 +++++++-------
fs/quota/dquot.c | 26 +++++++++++++-------------
fs/quota/quota_tree.c | 2 +-
include/linux/quota.h | 2 +-
4 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index cdae8d1..b464947 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -32,7 +32,7 @@
* Locking of quotas with OCFS2 is rather complex. Here are rules that
* should be obeyed by all the functions:
* - any write of quota structure (either to local or global file) is protected
- * by dqio_mutex or dquot->dq_lock.
+ * by dqio_mutex or dquot->dq_mutex.
* - any modification of global quota file holds inode cluster lock, i_mutex,
* and ip_alloc_sem of the global quota file (achieved by
* ocfs2_lock_global_qf). It also has to hold qinfo_lock.
@@ -47,13 +47,13 @@
* write to gf
* -> write to lf
* Acquire dquot for the first time:
- * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
+ * dq_mutex -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
* -> alloc space for gf
* -> start_trans -> qinfo_lock -> write to gf
* -> ip_alloc_sem of lf -> alloc space for lf
* -> write to lf
* Release last reference to dquot:
- * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
+ * dq_mutex -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
* -> write to lf
* Note that all the above operations also hold the inode cluster lock of lf.
* Recovery:
@@ -690,7 +690,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
- mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_mutex);
/* Check whether we are not racing with some other dqget() */
if (atomic_read(&dquot->dq_count) > 1)
goto out;
@@ -723,7 +723,7 @@ out_trans:
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
- mutex_unlock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_mutex);
mlog_exit(status);
return status;
}
@@ -746,7 +746,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
handle_t *handle;
mlog_entry("id=%u, type=%d", dquot->dq_id, type);
- mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_mutex);
/*
* We need an exclusive lock, because we're going to update use count
* and instantiate possibly new dquot structure
@@ -810,7 +810,7 @@ out_dq:
goto out;
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
- mutex_unlock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_mutex);
mlog_exit(status);
return status;
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 2d08996..fdaa386 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -106,17 +106,17 @@
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode.
*
- * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
- * from inodes (dquot_alloc_space() and such don't check the dq_lock).
+ * Each dquot has its dq_mutex mutex. Locked dquots might not be referenced
+ * from inodes (dquot_alloc_space() and such don't check the dq_mutex).
* Currently dquot is locked only when it is being read to memory (or space for
* it is being allocated) on the first dqget() and when it is being released on
* the last dqput(). The allocation and release oparations are serialized by
- * the dq_lock and by checking the use count in dquot_release(). Write
- * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
+ * the dq_mutex and by checking the use count in dquot_release(). Write
+ * operations on dquots don't hold dq_mutex as they copy data under dq_data_lock
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_mutex >
* dqio_mutex
* The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
* dqptr_sem. But filesystem has to count with the fact that functions such as
@@ -321,8 +321,8 @@ static inline void remove_inuse(struct dquot *dquot)
static void wait_on_dquot(struct dquot *dquot)
{
- mutex_lock(&dquot->dq_lock);
- mutex_unlock(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_mutex);
+ mutex_unlock(&dquot->dq_mutex);
}
static inline int dquot_dirty(struct dquot *dquot)
@@ -404,7 +404,7 @@ int dquot_acquire(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopts(dquot);
- mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_mutex);
mutex_lock(&dqopt->dqio_mutex);
if (!test_bit(DQ_READ_B, &dquot->dq_flags))
ret = dqopt->fmt_ops[dquot->dq_type]->read_dqblk(dquot);
@@ -429,7 +429,7 @@ int dquot_acquire(struct dquot *dquot)
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_iolock:
mutex_unlock(&dqopt->dqio_mutex);
- mutex_unlock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_mutex);
return ret;
}
EXPORT_SYMBOL(dquot_acquire);
@@ -474,7 +474,7 @@ int dquot_release(struct dquot *dquot)
int ret = 0, ret2 = 0;
struct quota_info *dqopt = sb_dqopts(dquot);
- mutex_lock(&dquot->dq_lock);
+ mutex_lock(&dquot->dq_mutex);
/* Check whether we are not racing with some other dqget() */
if (atomic_read(&dquot->dq_count) > 1)
goto out_dqlock;
@@ -492,7 +492,7 @@ int dquot_release(struct dquot *dquot)
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
mutex_unlock(&dqopt->dqio_mutex);
out_dqlock:
- mutex_unlock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_mutex);
return ret;
}
EXPORT_SYMBOL(dquot_release);
@@ -808,7 +808,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
if(!dquot)
return NULL;
- mutex_init(&dquot->dq_lock);
+ mutex_init(&dquot->dq_mutex);
INIT_LIST_HEAD(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_inuse);
INIT_HLIST_NODE(&dquot->dq_hash);
@@ -872,7 +872,7 @@ we_slept:
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);
}
- /* Wait for dq_lock - after this we know that either dquot_release() is
+ /* Wait for dq_mutex - after this we know that either dquot_release() is
* already finished or it will be canceled due to dq_count > 1 test */
wait_on_dquot(dquot);
/* Read the dquot / allocate space in quota file */
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index c0917f4..21a4a6a 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -647,7 +647,7 @@ out:
EXPORT_SYMBOL(qtree_read_dquot);
/* Check whether dquot should not be deleted. We know we are
- * the only one operating on dquot (thanks to dq_lock) */
+ * the only one operating on dquot (thanks to dq_mutex) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
diff --git a/include/linux/quota.h b/include/linux/quota.h
index eaa9f91..754aedb 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -287,7 +287,7 @@ struct dquot {
struct list_head dq_inuse; /* List of all quotas */
struct list_head dq_free; /* Free list element */
struct list_head dq_dirty; /* List of dirty dquots */
- struct mutex dq_lock; /* dquot IO lock */
+ struct mutex dq_mutex; /* dquot IO mutex */
atomic_t dq_count; /* Use count */
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 10/19] quota: make per-sb dq_data_lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (8 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 09/19] quota: rename dq_lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-26 17:38 ` Dmitry
2010-10-22 17:34 ` [PATCH 11/19] quota: protect dquot mem info with object's lock Dmitry Monakhov
` (8 subsequent siblings)
18 siblings, 1 reply; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Currently dq_data_lock is global, which is bad for scalability.
In fact different super_blocks have no shared quota data.
So we may simply convert global the lock to per-sb locks.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ocfs2/quota_global.c | 20 ++++++++--------
fs/ocfs2/quota_local.c | 13 ++++++-----
fs/quota/dquot.c | 54 ++++++++++++++++++++++++----------------------
fs/quota/quota_tree.c | 8 +++---
fs/quota/quota_v2.c | 4 +-
include/linux/quota.h | 3 +-
6 files changed, 52 insertions(+), 50 deletions(-)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index b464947..3e7fda8 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -300,12 +300,12 @@ int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
if (status < 0)
return status;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
if (!oinfo->dqi_gqi_count++)
oinfo->dqi_gqi_bh = bh;
else
WARN_ON(bh != oinfo->dqi_gqi_bh);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
if (ex) {
mutex_lock(&oinfo->dqi_gqinode->i_mutex);
down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
@@ -325,10 +325,10 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
}
ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
brelse(oinfo->dqi_gqi_bh);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
if (!--oinfo->dqi_gqi_count)
oinfo->dqi_gqi_bh = NULL;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
}
/* Read information header from global quota file */
@@ -421,11 +421,11 @@ static int __ocfs2_global_write_info(struct super_block *sb, int type)
struct ocfs2_global_disk_dqinfo dinfo;
ssize_t size;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
@@ -502,7 +502,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
/* Update space and inode usage. Get also other information from
* global quota file so that we don't overwrite any changes there.
* We are */
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
spacechange = dquot->dq_dqb.dqb_curspace -
OCFS2_DQUOT(dquot)->dq_origspace;
inodechange = dquot->dq_dqb.dqb_curinodes -
@@ -556,7 +556,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
__clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
@@ -835,10 +835,10 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
if (dquot->dq_flags & mask)
sync = 1;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
/* This is a slight hack but we can't afford getting global quota
* lock if we already have a transaction started. */
if (!sync || journal_current_handle()) {
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 7c30ba3..2d2e981 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -288,14 +288,15 @@ static void olq_update_info(struct buffer_head *bh, void *private)
struct mem_dqinfo *info = private;
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_local_disk_dqinfo *ldinfo;
+ struct quota_info *dqopt = dqopts(oinfo->dqi_gqinode->i_sb);
ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
OCFS2_LOCAL_INFO_OFF);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopt->dq_data_lock);
ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopt->dq_data_lock);
}
static int ocfs2_add_recovery_chunk(struct super_block *sb,
@@ -523,7 +524,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_drop_lock;
}
mutex_lock(&dqopts(sb)->dqio_mutex);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
/* Add usage from quota entry into quota changes
* of our node. Auxiliary variables are important
* due to signedness */
@@ -531,7 +532,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
inodechange = le64_to_cpu(dqblk->dqb_inodemod);
dquot->dq_dqb.dqb_curspace += spacechange;
dquot->dq_dqb.dqb_curinodes += inodechange;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
/* We want to drop reference held by the crashed
* node. Since we have our own reference we know
* global structure actually won't be freed. */
@@ -876,12 +877,12 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
+ ol_dqblk_block_offset(sb, od->dq_local_off));
dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace -
od->dq_origspace);
dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
od->dq_originodes);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
mlog(0, "Writing local dquot %u space %lld inodes %lld\n",
od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod),
(long long)le64_to_cpu(dqblk->dqb_inodemod));
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index fdaa386..6c68172 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -128,8 +128,6 @@
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_fmt_lock);
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
-EXPORT_SYMBOL(dq_data_lock);
void __quota_error(struct super_block *sb, const char *func,
const char *fmt, ...)
@@ -1417,8 +1415,11 @@ static void __dquot_initialize(struct inode *inode, int type)
* did a write before quota was turned on
*/
rsv = inode_get_rsv_space(inode);
- if (unlikely(rsv))
- dquot_resv_space(inode->i_dquot[cnt], rsv);
+ if (unlikely(rsv)) {
+ spin_lock(&got[cnt]->dq_lock);
+ dquot_resv_space(got[cnt], rsv);
+ spin_unlock(&got[cnt]->dq_lock);
+ }
}
}
out_err:
@@ -1574,14 +1575,14 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
ret = check_bdq(inode->i_dquot[cnt], number, !warn,
warntype+cnt);
if (ret && !nofail) {
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
goto out_flush_warn;
}
}
@@ -1594,7 +1595,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
dquot_incr_space(inode->i_dquot[cnt], number);
}
inode_incr_space(inode, number, reserve);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
goto out_flush_warn;
@@ -1622,7 +1623,7 @@ int dquot_alloc_inode(const struct inode *inode)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
@@ -1638,7 +1639,7 @@ int dquot_alloc_inode(const struct inode *inode)
}
warn_put_all:
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
@@ -1660,7 +1661,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
}
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt])
@@ -1669,7 +1670,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
}
/* Update inode bytes */
inode_claim_rsv_space(inode, number);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
up_read(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
@@ -1693,7 +1694,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
}
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
@@ -1704,7 +1705,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
dquot_decr_space(inode->i_dquot[cnt], number);
}
inode_decr_space(inode, number, reserve);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
goto out_unlock;
@@ -1729,14 +1730,14 @@ void dquot_free_inode(const struct inode *inode)
return;
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
dquot_decr_inodes(inode->i_dquot[cnt], 1);
}
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&dqctl(inode->i_sb)->dqptr_sem);
@@ -1775,7 +1776,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
up_write(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
}
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
cur_space = inode_get_bytes(inode);
rsv_space = inode_get_rsv_space(inode);
space = cur_space + rsv_space;
@@ -1823,7 +1824,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
inode->i_dquot[cnt] = transfer_to[cnt];
}
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
mark_all_dquot_dirty(transfer_from);
@@ -1837,7 +1838,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
transfer_to[cnt] = transfer_from[cnt];
return 0;
over_quota:
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
flush_warnings(transfer_to, warntype_to);
return ret;
@@ -1958,6 +1959,7 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
mutex_init(&dqopt->dqio_mutex);
spin_lock_init(&dqopt->dq_state_lock);
+ spin_lock_init(&dqopt->dq_data_lock);
spin_lock_init(&dqopt->dq_list_lock);
INIT_LIST_HEAD(&dqopt->dq_inuse_list);
INIT_LIST_HEAD(&dqopt->dq_free_list);
@@ -2415,7 +2417,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
FS_USER_QUOTA : FS_GROUP_QUOTA;
di->d_id = dquot->dq_id;
- spin_lock(&dq_data_lock);
+ spin_lock(&sb_dqopts(dquot)->dq_data_lock);
di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
di->d_ino_hardlimit = dm->dqb_ihardlimit;
@@ -2424,7 +2426,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
di->d_icount = dm->dqb_curinodes;
di->d_btimer = dm->dqb_btime;
di->d_itimer = dm->dqb_itime;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
}
int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
@@ -2467,7 +2469,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
(di->d_ino_hardlimit > dqi->dqi_maxilimit)))
return -ERANGE;
- spin_lock(&dq_data_lock);
+ spin_lock(&sb_dqopts(dquot)->dq_data_lock);
if (di->d_fieldmask & FS_DQ_BCOUNT) {
dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
check_blim = 1;
@@ -2533,7 +2535,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
mark_dquot_dirty(dquot);
return 0;
@@ -2568,12 +2570,12 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
return -ESRCH;
}
mi = dqopts(sb)->info + type;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
ii->dqi_bgrace = mi->dqi_bgrace;
ii->dqi_igrace = mi->dqi_igrace;
ii->dqi_flags = mi->dqi_flags & DQF_MASK;
ii->dqi_valid = IIF_ALL;
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
mutex_unlock(&dqctl(sb)->dqonoff_mutex);
return 0;
}
@@ -2591,7 +2593,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
goto out;
}
mi = dqopts(sb)->info + type;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
if (ii->dqi_valid & IIF_BGRACE)
mi->dqi_bgrace = ii->dqi_bgrace;
if (ii->dqi_valid & IIF_IGRACE)
@@ -2599,7 +2601,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
if (ii->dqi_valid & IIF_FLAGS)
mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
(ii->dqi_flags & DQF_MASK);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
dqctl(sb)->dq_op->write_info(sb, type);
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 21a4a6a..a089c70 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -375,9 +375,9 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
return ret;
}
}
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
@@ -631,14 +631,14 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
kfree(ddquot);
goto out;
}
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
if (!dquot->dq_dqb.dqb_bhardlimit &&
!dquot->dq_dqb.dqb_bsoftlimit &&
!dquot->dq_dqb.dqb_ihardlimit &&
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
kfree(ddquot);
out:
dqstats_inc(DQST_READS);
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 65444d2..e4ef8de 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -153,12 +153,12 @@ static int v2_write_file_info(struct super_block *sb, int type)
struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
ssize_t size;
- spin_lock(&dq_data_lock);
+ spin_lock(&dqopts(sb)->dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dqopts(sb)->dq_data_lock);
dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 754aedb..6b04001 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -186,8 +186,6 @@ enum {
typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
typedef long long qsize_t; /* Type in which we store sizes */
-extern spinlock_t dq_data_lock;
-
/* Maximal numbers of writes for quota operation (insert/delete/update)
* (over VFS all formats) */
#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC)
@@ -413,6 +411,7 @@ struct quota_info {
struct mutex dqio_mutex; /* lock device while I/O in progress */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
spinlock_t dq_state_lock; /* serialize quota state changes*/
+ spinlock_t dq_data_lock; /* protect in memory data */
spinlock_t dq_list_lock; /* protect lists and hash*/
struct list_head dq_inuse_list; /* list of inused dquotas */
struct list_head dq_free_list; /* list of free dquotas */
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH 10/19] quota: make per-sb dq_data_lock
2010-10-22 17:34 ` [PATCH 10/19] quota: make per-sb dq_data_lock Dmitry Monakhov
@ 2010-10-26 17:38 ` Dmitry
0 siblings, 0 replies; 26+ messages in thread
From: Dmitry @ 2010-10-26 17:38 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch
On Fri, 22 Oct 2010 21:34:55 +0400, Dmitry Monakhov <dmonakhov@openvz.org> wrote:
> From: Dmitry Monakhov <dmonakhov@gmail.com>
>
> Currently dq_data_lock is global, which is bad for scalability.
> In fact different super_blocks have no shared quota data.
> So we may simply convert global the lock to per-sb locks.
>
> Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
> ---
> fs/ocfs2/quota_global.c | 20 ++++++++--------
> fs/ocfs2/quota_local.c | 13 ++++++-----
> fs/quota/dquot.c | 54 ++++++++++++++++++++++++----------------------
> fs/quota/quota_tree.c | 8 +++---
> fs/quota/quota_v2.c | 4 +-
> include/linux/quota.h | 3 +-
> 6 files changed, 52 insertions(+), 50 deletions(-)
>
> diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
> index b464947..3e7fda8 100644
> --- a/fs/ocfs2/quota_global.c
> +++ b/fs/ocfs2/quota_global.c
> @@ -300,12 +300,12 @@ int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
> status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
> if (status < 0)
> return status;
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
> if (!oinfo->dqi_gqi_count++)
> oinfo->dqi_gqi_bh = bh;
> else
> WARN_ON(bh != oinfo->dqi_gqi_bh);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
> if (ex) {
> mutex_lock(&oinfo->dqi_gqinode->i_mutex);
> down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
> @@ -325,10 +325,10 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
> }
> ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
> brelse(oinfo->dqi_gqi_bh);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
> if (!--oinfo->dqi_gqi_count)
> oinfo->dqi_gqi_bh = NULL;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(oinfo->dqi_gqinode->i_sb)->dq_data_lock);
> }
>
> /* Read information header from global quota file */
> @@ -421,11 +421,11 @@ static int __ocfs2_global_write_info(struct super_block *sb, int type)
> struct ocfs2_global_disk_dqinfo dinfo;
> ssize_t size;
>
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> info->dqi_flags &= ~DQF_INFO_DIRTY;
> dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
> dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
> dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
> dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
> @@ -502,7 +502,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
> /* Update space and inode usage. Get also other information from
> * global quota file so that we don't overwrite any changes there.
> * We are */
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> spacechange = dquot->dq_dqb.dqb_curspace -
> OCFS2_DQUOT(dquot)->dq_origspace;
> inodechange = dquot->dq_dqb.dqb_curinodes -
> @@ -556,7 +556,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
> __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
> OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
> OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> err = ocfs2_qinfo_lock(info, freeing);
> if (err < 0) {
> mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
> @@ -835,10 +835,10 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
>
> /* In case user set some limits, sync dquot immediately to global
> * quota file so that information propagates quicker */
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> if (dquot->dq_flags & mask)
> sync = 1;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> /* This is a slight hack but we can't afford getting global quota
> * lock if we already have a transaction started. */
> if (!sync || journal_current_handle()) {
> diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
> index 7c30ba3..2d2e981 100644
> --- a/fs/ocfs2/quota_local.c
> +++ b/fs/ocfs2/quota_local.c
> @@ -288,14 +288,15 @@ static void olq_update_info(struct buffer_head *bh, void *private)
> struct mem_dqinfo *info = private;
> struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
> struct ocfs2_local_disk_dqinfo *ldinfo;
> + struct quota_info *dqopt = dqopts(oinfo->dqi_gqinode->i_sb);
>
> ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
> OCFS2_LOCAL_INFO_OFF);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopt->dq_data_lock);
> ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
> ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
> ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopt->dq_data_lock);
> }
>
> static int ocfs2_add_recovery_chunk(struct super_block *sb,
> @@ -523,7 +524,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
> goto out_drop_lock;
> }
> mutex_lock(&dqopts(sb)->dqio_mutex);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> /* Add usage from quota entry into quota changes
> * of our node. Auxiliary variables are important
> * due to signedness */
> @@ -531,7 +532,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
> inodechange = le64_to_cpu(dqblk->dqb_inodemod);
> dquot->dq_dqb.dqb_curspace += spacechange;
> dquot->dq_dqb.dqb_curinodes += inodechange;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> /* We want to drop reference held by the crashed
> * node. Since we have our own reference we know
> * global structure actually won't be freed. */
> @@ -876,12 +877,12 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
> + ol_dqblk_block_offset(sb, od->dq_local_off));
>
> dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace -
> od->dq_origspace);
> dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
> od->dq_originodes);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> mlog(0, "Writing local dquot %u space %lld inodes %lld\n",
> od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod),
> (long long)le64_to_cpu(dqblk->dqb_inodemod));
> diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
> index fdaa386..6c68172 100644
> --- a/fs/quota/dquot.c
> +++ b/fs/quota/dquot.c
> @@ -128,8 +128,6 @@
> */
>
> static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_fmt_lock);
> -__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
> -EXPORT_SYMBOL(dq_data_lock);
>
> void __quota_error(struct super_block *sb, const char *func,
> const char *fmt, ...)
> @@ -1417,8 +1415,11 @@ static void __dquot_initialize(struct inode *inode, int type)
> * did a write before quota was turned on
> */
> rsv = inode_get_rsv_space(inode);
> - if (unlikely(rsv))
> - dquot_resv_space(inode->i_dquot[cnt], rsv);
> + if (unlikely(rsv)) {
> + spin_lock(&got[cnt]->dq_lock);
> + dquot_resv_space(got[cnt], rsv);
> + spin_unlock(&got[cnt]->dq_lock);
> + }
Ohhh, sorry. got[cnt] is already NULL at this moment,
Off course this hank should look like follows
+ if (unlikely(rsv)) {
+ spin_lock(&inode->i_dquot[cnt]->dq_lock);
+ dquot_resv_space(inode->i_dquot[cnt], rsv);
+ spin_unlock(&inode->i_dquot[cnt]->dq_lock);
> }
> }
> out_err:
> @@ -1574,14 +1575,14 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
> for (cnt = 0; cnt < MAXQUOTAS; cnt++)
> warntype[cnt] = QUOTA_NL_NOWARN;
>
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
> if (!inode->i_dquot[cnt])
> continue;
> ret = check_bdq(inode->i_dquot[cnt], number, !warn,
> warntype+cnt);
> if (ret && !nofail) {
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> goto out_flush_warn;
> }
> }
> @@ -1594,7 +1595,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
> dquot_incr_space(inode->i_dquot[cnt], number);
> }
> inode_incr_space(inode, number, reserve);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
>
> if (reserve)
> goto out_flush_warn;
> @@ -1622,7 +1623,7 @@ int dquot_alloc_inode(const struct inode *inode)
> for (cnt = 0; cnt < MAXQUOTAS; cnt++)
> warntype[cnt] = QUOTA_NL_NOWARN;
> down_read(&dqctl(inode->i_sb)->dqptr_sem);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
> if (!inode->i_dquot[cnt])
> continue;
> @@ -1638,7 +1639,7 @@ int dquot_alloc_inode(const struct inode *inode)
> }
>
> warn_put_all:
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> if (ret == 0)
> mark_all_dquot_dirty(inode->i_dquot);
> flush_warnings(inode->i_dquot, warntype);
> @@ -1660,7 +1661,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
> }
>
> down_read(&dqctl(inode->i_sb)->dqptr_sem);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> /* Claim reserved quotas to allocated quotas */
> for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
> if (inode->i_dquot[cnt])
> @@ -1669,7 +1670,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
> }
> /* Update inode bytes */
> inode_claim_rsv_space(inode, number);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> mark_all_dquot_dirty(inode->i_dquot);
> up_read(&dqctl(inode->i_sb)->dqptr_sem);
> return 0;
> @@ -1693,7 +1694,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
> }
>
> down_read(&dqctl(inode->i_sb)->dqptr_sem);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
> if (!inode->i_dquot[cnt])
> continue;
> @@ -1704,7 +1705,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
> dquot_decr_space(inode->i_dquot[cnt], number);
> }
> inode_decr_space(inode, number, reserve);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
>
> if (reserve)
> goto out_unlock;
> @@ -1729,14 +1730,14 @@ void dquot_free_inode(const struct inode *inode)
> return;
>
> down_read(&dqctl(inode->i_sb)->dqptr_sem);
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
> if (!inode->i_dquot[cnt])
> continue;
> warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
> dquot_decr_inodes(inode->i_dquot[cnt], 1);
> }
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> mark_all_dquot_dirty(inode->i_dquot);
> flush_warnings(inode->i_dquot, warntype);
> up_read(&dqctl(inode->i_sb)->dqptr_sem);
> @@ -1775,7 +1776,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
> up_write(&dqctl(inode->i_sb)->dqptr_sem);
> return 0;
> }
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
> cur_space = inode_get_bytes(inode);
> rsv_space = inode_get_rsv_space(inode);
> space = cur_space + rsv_space;
> @@ -1823,7 +1824,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
>
> inode->i_dquot[cnt] = transfer_to[cnt];
> }
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> up_write(&dqctl(inode->i_sb)->dqptr_sem);
>
> mark_all_dquot_dirty(transfer_from);
> @@ -1837,7 +1838,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
> transfer_to[cnt] = transfer_from[cnt];
> return 0;
> over_quota:
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
> up_write(&dqctl(inode->i_sb)->dqptr_sem);
> flush_warnings(transfer_to, warntype_to);
> return ret;
> @@ -1958,6 +1959,7 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
>
> mutex_init(&dqopt->dqio_mutex);
> spin_lock_init(&dqopt->dq_state_lock);
> + spin_lock_init(&dqopt->dq_data_lock);
> spin_lock_init(&dqopt->dq_list_lock);
> INIT_LIST_HEAD(&dqopt->dq_inuse_list);
> INIT_LIST_HEAD(&dqopt->dq_free_list);
> @@ -2415,7 +2417,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
> FS_USER_QUOTA : FS_GROUP_QUOTA;
> di->d_id = dquot->dq_id;
>
> - spin_lock(&dq_data_lock);
> + spin_lock(&sb_dqopts(dquot)->dq_data_lock);
> di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
> di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
> di->d_ino_hardlimit = dm->dqb_ihardlimit;
> @@ -2424,7 +2426,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
> di->d_icount = dm->dqb_curinodes;
> di->d_btimer = dm->dqb_btime;
> di->d_itimer = dm->dqb_itime;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
> }
>
> int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
> @@ -2467,7 +2469,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
> (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
> return -ERANGE;
>
> - spin_lock(&dq_data_lock);
> + spin_lock(&sb_dqopts(dquot)->dq_data_lock);
> if (di->d_fieldmask & FS_DQ_BCOUNT) {
> dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
> check_blim = 1;
> @@ -2533,7 +2535,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
> clear_bit(DQ_FAKE_B, &dquot->dq_flags);
> else
> set_bit(DQ_FAKE_B, &dquot->dq_flags);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
> mark_dquot_dirty(dquot);
>
> return 0;
> @@ -2568,12 +2570,12 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
> return -ESRCH;
> }
> mi = dqopts(sb)->info + type;
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> ii->dqi_bgrace = mi->dqi_bgrace;
> ii->dqi_igrace = mi->dqi_igrace;
> ii->dqi_flags = mi->dqi_flags & DQF_MASK;
> ii->dqi_valid = IIF_ALL;
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> mutex_unlock(&dqctl(sb)->dqonoff_mutex);
> return 0;
> }
> @@ -2591,7 +2593,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
> goto out;
> }
> mi = dqopts(sb)->info + type;
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> if (ii->dqi_valid & IIF_BGRACE)
> mi->dqi_bgrace = ii->dqi_bgrace;
> if (ii->dqi_valid & IIF_IGRACE)
> @@ -2599,7 +2601,7 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
> if (ii->dqi_valid & IIF_FLAGS)
> mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
> (ii->dqi_flags & DQF_MASK);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> mark_info_dirty(sb, type);
> /* Force write to disk */
> dqctl(sb)->dq_op->write_info(sb, type);
> diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
> index 21a4a6a..a089c70 100644
> --- a/fs/quota/quota_tree.c
> +++ b/fs/quota/quota_tree.c
> @@ -375,9 +375,9 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
> return ret;
> }
> }
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
> dquot->dq_off);
> if (ret != info->dqi_entry_size) {
> @@ -631,14 +631,14 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
> kfree(ddquot);
> goto out;
> }
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
> if (!dquot->dq_dqb.dqb_bhardlimit &&
> !dquot->dq_dqb.dqb_bsoftlimit &&
> !dquot->dq_dqb.dqb_ihardlimit &&
> !dquot->dq_dqb.dqb_isoftlimit)
> set_bit(DQ_FAKE_B, &dquot->dq_flags);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> kfree(ddquot);
> out:
> dqstats_inc(DQST_READS);
> diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
> index 65444d2..e4ef8de 100644
> --- a/fs/quota/quota_v2.c
> +++ b/fs/quota/quota_v2.c
> @@ -153,12 +153,12 @@ static int v2_write_file_info(struct super_block *sb, int type)
> struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
> ssize_t size;
>
> - spin_lock(&dq_data_lock);
> + spin_lock(&dqopts(sb)->dq_data_lock);
> info->dqi_flags &= ~DQF_INFO_DIRTY;
> dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
> dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
> dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
> - spin_unlock(&dq_data_lock);
> + spin_unlock(&dqopts(sb)->dq_data_lock);
> dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
> dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
> dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
> diff --git a/include/linux/quota.h b/include/linux/quota.h
> index 754aedb..6b04001 100644
> --- a/include/linux/quota.h
> +++ b/include/linux/quota.h
> @@ -186,8 +186,6 @@ enum {
> typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
> typedef long long qsize_t; /* Type in which we store sizes */
>
> -extern spinlock_t dq_data_lock;
> -
> /* Maximal numbers of writes for quota operation (insert/delete/update)
> * (over VFS all formats) */
> #define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC)
> @@ -413,6 +411,7 @@ struct quota_info {
> struct mutex dqio_mutex; /* lock device while I/O in progress */
> struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
> spinlock_t dq_state_lock; /* serialize quota state changes*/
> + spinlock_t dq_data_lock; /* protect in memory data */
> spinlock_t dq_list_lock; /* protect lists and hash*/
> struct list_head dq_inuse_list; /* list of inused dquotas */
> struct list_head dq_free_list; /* list of free dquotas */
> --
> 1.6.5.2
>
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH 11/19] quota: protect dquot mem info with object's lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (9 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 10/19] quota: make per-sb dq_data_lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 12/19] quota: drop dq_data_lock where possible Dmitry Monakhov
` (7 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
currently ->dq_data_lock is responsible for protecting three things
1) dquot->dq_dqb info consistency
2) synchronization between ->dq_dqb with ->i_bytes
3) Protects mem_dqinfo (per-sb data),
3b) and consistency between mem_dqinfo and dq_dqb for following data.
dqi_bgrace <=> dqb_btime
dqi_igrace <=> dqb_itime
In fact (1) and (2) is conceptually different from (3)
By introducing per-dquot data lock we later can split (1)(2) from (3)
This patch simply introduces a new lock, without changing ->dq_data_lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ocfs2/quota_global.c | 4 ++
fs/ocfs2/quota_local.c | 4 ++
fs/quota/dquot.c | 109 ++++++++++++++++++++++++++++++++++++++++++-----
fs/quota/quota_tree.c | 4 ++
include/linux/quota.h | 13 ++++++
5 files changed, 123 insertions(+), 11 deletions(-)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3e7fda8..b768588 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -503,6 +503,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
* global quota file so that we don't overwrite any changes there.
* We are */
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
spacechange = dquot->dq_dqb.dqb_curspace -
OCFS2_DQUOT(dquot)->dq_origspace;
inodechange = dquot->dq_dqb.dqb_curinodes -
@@ -556,6 +557,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
__clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
@@ -836,8 +838,10 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
if (dquot->dq_flags & mask)
sync = 1;
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
/* This is a slight hack but we can't afford getting global quota
* lock if we already have a transaction started. */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2d2e981..1490cb0 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -525,6 +525,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
}
mutex_lock(&dqopts(sb)->dqio_mutex);
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
/* Add usage from quota entry into quota changes
* of our node. Auxiliary variables are important
* due to signedness */
@@ -532,6 +533,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
inodechange = le64_to_cpu(dqblk->dqb_inodemod);
dquot->dq_dqb.dqb_curspace += spacechange;
dquot->dq_dqb.dqb_curinodes += inodechange;
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
/* We want to drop reference held by the crashed
* node. Since we have our own reference we know
@@ -878,10 +880,12 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id);
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&od->dq_dquot.dq_lock);
dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace -
od->dq_origspace);
dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes -
od->dq_originodes);
+ spin_unlock(&od->dq_dquot.dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
mlog(0, "Writing local dquot %u space %lld inodes %lld\n",
od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod),
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 6c68172..f100fb4 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -82,16 +82,18 @@
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
- * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
- * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
+ * dq_data_lock protects mem_dqinfo structures and mem_dqinfo with
+ * dq_dqb consystency.
+ * dq_lock protects dquot->dq_dqb and also guards consistency of
+ * dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
* in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
* modifications of quota state (on quotaon and quotaoff) and readers who care
* about latest values take it as well.
*
- * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
+ * The spinlock ordering is hence:
+ * dq_data_lock > dq_lock > dq_list_lock > i_lock,
* dq_list_lock > dq_state_lock
- * dq_list_lock > dq_list_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
@@ -147,6 +149,9 @@ EXPORT_SYMBOL(__quota_error);
#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
static char *quotatypes[] = INITQFNAMES;
+#define ASSERT_SPIN_LOCKED(lk) assert_spin_locked(lk)
+#else
+#define ASSERT_SPIN_LOCKED(lk)
#endif
static struct quota_format_type *quota_formats; /* List of registered formats */
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
@@ -378,6 +383,56 @@ static inline void dqput_all(struct dquot **dquot)
dqput(dquot[cnt]);
}
+static void __lock_dquot_double(struct dquot * const dq1,
+ struct dquot * const dq2)
+{
+ if(dq1 < dq2) {
+ spin_lock_nested(&dq1->dq_lock, DQUOT_LOCK_CLASS(dq1));
+ spin_lock_nested(&dq2->dq_lock, DQUOT_LOCK_CLASS_NESTED(dq2));
+ } else {
+ spin_lock_nested(&dq2->dq_lock, DQUOT_LOCK_CLASS(dq2));
+ spin_lock_nested(&dq1->dq_lock, DQUOT_LOCK_CLASS_NESTED(dq1));
+ }
+}
+
+/* This is strange, but all conditions are possible */
+static inline void lock_dquot_double(struct dquot * const *dq1,
+ struct dquot * const *dq2)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dq1[cnt]) {
+ if (likely(dq2[cnt]))
+ __lock_dquot_double(dq1[cnt], dq2[cnt]);
+ else
+ spin_lock_nested(&dq1[cnt]->dq_lock,
+ DQUOT_LOCK_CLASS(dq1[cnt]));
+ } else {
+ if (unlikely(dq2[cnt]))
+ spin_lock_nested(&dq2[cnt]->dq_lock,
+ DQUOT_LOCK_CLASS(dq2[cnt]));
+ }
+ }
+}
+static inline void lock_inode_dquots(struct dquot * const *dquot)
+{
+ unsigned int cnt;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (dquot[cnt])
+ spin_lock_nested(&dquot[cnt]->dq_lock,
+ DQUOT_LOCK_CLASS(dquot[cnt]));
+}
+
+static inline void unlock_inode_dquots(struct dquot * const *dquot)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (dquot[cnt])
+ spin_unlock(&dquot[cnt]->dq_lock);
+}
+
/* This function needs dq_list_lock */
static inline int clear_dquot_dirty(struct dquot *dquot)
{
@@ -812,6 +867,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
INIT_HLIST_NODE(&dquot->dq_hash);
INIT_LIST_HEAD(&dquot->dq_dirty);
init_waitqueue_head(&dquot->dq_wait_unused);
+ spin_lock_init(&dquot->dq_lock);
dquot->dq_sb = sb;
dquot->dq_type = type;
atomic_set(&dquot->dq_count, 1);
@@ -1060,16 +1116,19 @@ static void drop_dquot_ref(struct super_block *sb, int type)
static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
dquot->dq_dqb.dqb_curinodes += number;
}
static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
dquot->dq_dqb.dqb_curspace += number;
}
static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
dquot->dq_dqb.dqb_rsvspace += number;
}
@@ -1078,6 +1137,7 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
*/
static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
if (dquot->dq_dqb.dqb_rsvspace < number) {
WARN_ON_ONCE(1);
number = dquot->dq_dqb.dqb_rsvspace;
@@ -1089,6 +1149,7 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
static inline
void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
if (dquot->dq_dqb.dqb_rsvspace >= number)
dquot->dq_dqb.dqb_rsvspace -= number;
else {
@@ -1099,6 +1160,7 @@ void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
if (dqctl(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
dquot->dq_dqb.dqb_curinodes -= number;
@@ -1111,6 +1173,7 @@ static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
+ ASSERT_SPIN_LOCKED(&dquot->dq_lock);
if (dqctl(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
dquot->dq_dqb.dqb_curspace -= number;
@@ -1228,7 +1291,7 @@ static int ignore_hardlimit(struct dquot *dquot)
!(info->dqi_flags & V1_DQF_RSQUASH));
}
-/* needs dq_data_lock */
+/* needs dq_data_lock, ->dq_lock */
static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
{
qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
@@ -1265,7 +1328,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
return 0;
}
-/* needs dq_data_lock */
+/* needs dq_data_lock, ->dq_lock */
static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
{
qsize_t tspace;
@@ -1576,12 +1639,14 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
warntype[cnt] = QUOTA_NL_NOWARN;
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
+ lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
ret = check_bdq(inode->i_dquot[cnt], number, !warn,
- warntype+cnt);
+ warntype + cnt);
if (ret && !nofail) {
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
goto out_flush_warn;
}
@@ -1595,6 +1660,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
dquot_incr_space(inode->i_dquot[cnt], number);
}
inode_incr_space(inode, number, reserve);
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
@@ -1624,6 +1690,7 @@ int dquot_alloc_inode(const struct inode *inode)
warntype[cnt] = QUOTA_NL_NOWARN;
down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
+ lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
@@ -1639,6 +1706,7 @@ int dquot_alloc_inode(const struct inode *inode)
}
warn_put_all:
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
@@ -1662,6 +1730,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
+ lock_inode_dquots(inode->i_dquot);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt])
@@ -1670,6 +1739,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
}
/* Update inode bytes */
inode_claim_rsv_space(inode, number);
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
up_read(&dqctl(inode->i_sb)->dqptr_sem);
@@ -1695,6 +1765,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
+ lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
@@ -1705,6 +1776,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
dquot_decr_space(inode->i_dquot[cnt], number);
}
inode_decr_space(inode, number, reserve);
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
@@ -1731,12 +1803,14 @@ void dquot_free_inode(const struct inode *inode)
down_read(&dqctl(inode->i_sb)->dqptr_sem);
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
+ lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
dquot_decr_inodes(inode->i_dquot[cnt], 1);
}
+ unlock_inode_dquots(inode->i_dquot);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
@@ -1777,10 +1851,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
return 0;
}
spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
- cur_space = inode_get_bytes(inode);
- rsv_space = inode_get_rsv_space(inode);
- space = cur_space + rsv_space;
- /* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
/*
* Skip changes for same uid or gid or for turned off quota-type.
@@ -1792,6 +1862,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
continue;
is_valid[cnt] = 1;
transfer_from[cnt] = inode->i_dquot[cnt];
+ }
+ lock_dquot_double(transfer_from, transfer_to);
+ cur_space = inode_get_bytes(inode);
+ rsv_space = inode_get_rsv_space(inode);
+ space = cur_space + rsv_space;
+ /* Build the transfer_from list and check the limits */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (!is_valid[cnt])
+ continue;
ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
if (ret)
goto over_quota;
@@ -1824,6 +1903,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
inode->i_dquot[cnt] = transfer_to[cnt];
}
+ unlock_inode_dquots(transfer_to);
+ unlock_inode_dquots(transfer_from);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
@@ -1838,6 +1919,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
transfer_to[cnt] = transfer_from[cnt];
return 0;
over_quota:
+ unlock_inode_dquots(transfer_to);
+ unlock_inode_dquots(transfer_from);
spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
flush_warnings(transfer_to, warntype_to);
@@ -2418,6 +2501,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
di->d_id = dquot->dq_id;
spin_lock(&sb_dqopts(dquot)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
di->d_ino_hardlimit = dm->dqb_ihardlimit;
@@ -2426,6 +2510,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
di->d_icount = dm->dqb_curinodes;
di->d_btimer = dm->dqb_btime;
di->d_itimer = dm->dqb_itime;
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
}
@@ -2470,6 +2555,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
return -ERANGE;
spin_lock(&sb_dqopts(dquot)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
if (di->d_fieldmask & FS_DQ_BCOUNT) {
dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
check_blim = 1;
@@ -2535,6 +2621,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
mark_dquot_dirty(dquot);
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index a089c70..e6307f6 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -376,7 +376,9 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
}
}
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
@@ -632,12 +634,14 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
goto out;
}
spin_lock(&dqopts(sb)->dq_data_lock);
+ spin_lock(&dquot->dq_lock);
info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
if (!dquot->dq_dqb.dqb_bhardlimit &&
!dquot->dq_dqb.dqb_bsoftlimit &&
!dquot->dq_dqb.dqb_ihardlimit &&
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ spin_unlock(&dquot->dq_lock);
spin_unlock(&dqopts(sb)->dq_data_lock);
kfree(ddquot);
out:
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 6b04001..c40fd80 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -279,6 +279,18 @@ static inline void dqstats_dec(unsigned int type)
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
* clear them when it sees fit. */
+/*
+ * To make lock_dep happy we have to place different dquot types to
+ * different lock classes.
+*/
+enum dquot_lock_class
+{
+ DQUOT_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
+ DQUOT_LOCK_NESTED
+};
+#define DQUOT_LOCK_CLASS(dquot) (DQUOT_LOCK_NORMAL + (dquot)->dq_type * 2)
+#define DQUOT_LOCK_CLASS_NESTED(dquot) (DQUOT_LOCK_NESTED + \
+ (dquot)->dq_type * 2)
struct dquot {
struct hlist_node dq_hash; /* Hash list in memory */
@@ -294,6 +306,7 @@ struct dquot {
unsigned long dq_flags; /* See DQ_* */
short dq_type; /* Type of quota */
struct mem_dqblk dq_dqb; /* Diskquota usage */
+ spinlock_t dq_lock; /* protect in mem_dqblk */
};
/* Operations which must be implemented by each quota format */
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 12/19] quota: drop dq_data_lock where possible
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (10 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 11/19] quota: protect dquot mem info with object's lock Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 13/19] quota: relax dq_data_lock dq_lock locking consistency Dmitry Monakhov
` (6 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
dq_data_lock is no longer responsible for dquot data protection.
FIXME: I've skipped ocfs2 code.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 2 --
fs/quota/quota_tree.c | 4 ----
2 files changed, 0 insertions(+), 6 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index f100fb4..b8fcfcd 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2500,7 +2500,6 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
FS_USER_QUOTA : FS_GROUP_QUOTA;
di->d_id = dquot->dq_id;
- spin_lock(&sb_dqopts(dquot)->dq_data_lock);
spin_lock(&dquot->dq_lock);
di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
@@ -2511,7 +2510,6 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
di->d_btimer = dm->dqb_btime;
di->d_itimer = dm->dqb_itime;
spin_unlock(&dquot->dq_lock);
- spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
}
int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index e6307f6..3af6d89 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -375,11 +375,9 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
return ret;
}
}
- spin_lock(&dqopts(sb)->dq_data_lock);
spin_lock(&dquot->dq_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dquot->dq_lock);
- spin_unlock(&dqopts(sb)->dq_data_lock);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
@@ -633,7 +631,6 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
kfree(ddquot);
goto out;
}
- spin_lock(&dqopts(sb)->dq_data_lock);
spin_lock(&dquot->dq_lock);
info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
if (!dquot->dq_dqb.dqb_bhardlimit &&
@@ -642,7 +639,6 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_lock);
- spin_unlock(&dqopts(sb)->dq_data_lock);
kfree(ddquot);
out:
dqstats_inc(DQST_READS);
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 13/19] quota: relax dq_data_lock dq_lock locking consistency
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (11 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 12/19] quota: drop dq_data_lock where possible Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:34 ` [PATCH 14/19] quota: protect dqget() from parallels quotaoff via RCU Dmitry Monakhov
` (5 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
Consistency between mem_info and dq_dqb is weak because we just copy
data from dqi_{bi}grace to dqb_{bi}time. So we protect dqb_{bi}time from
races with quota_ctl call.
Nothing actually happens if we relax this consistency requirement.
Since dqi_{bi}grace is (int) it is possible read it atomically without lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 16 ----------------
include/linux/quota.h | 2 ++
2 files changed, 2 insertions(+), 16 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index b8fcfcd..771aaab 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1638,7 +1638,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1647,7 +1646,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
warntype + cnt);
if (ret && !nofail) {
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
goto out_flush_warn;
}
}
@@ -1661,7 +1659,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
}
inode_incr_space(inode, number, reserve);
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
goto out_flush_warn;
@@ -1689,7 +1686,6 @@ int dquot_alloc_inode(const struct inode *inode)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1707,7 +1703,6 @@ int dquot_alloc_inode(const struct inode *inode)
warn_put_all:
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
@@ -1729,7 +1724,6 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
}
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
lock_inode_dquots(inode->i_dquot);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1740,7 +1734,6 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
/* Update inode bytes */
inode_claim_rsv_space(inode, number);
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
up_read(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
@@ -1764,7 +1757,6 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
}
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1777,7 +1769,6 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
}
inode_decr_space(inode, number, reserve);
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
if (reserve)
goto out_unlock;
@@ -1802,7 +1793,6 @@ void dquot_free_inode(const struct inode *inode)
return;
down_read(&dqctl(inode->i_sb)->dqptr_sem);
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1811,7 +1801,6 @@ void dquot_free_inode(const struct inode *inode)
dquot_decr_inodes(inode->i_dquot[cnt], 1);
}
unlock_inode_dquots(inode->i_dquot);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&dqctl(inode->i_sb)->dqptr_sem);
@@ -1850,7 +1839,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
up_write(&dqctl(inode->i_sb)->dqptr_sem);
return 0;
}
- spin_lock(&dqopts(inode->i_sb)->dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
/*
* Skip changes for same uid or gid or for turned off quota-type.
@@ -1905,7 +1893,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
}
unlock_inode_dquots(transfer_to);
unlock_inode_dquots(transfer_from);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
mark_all_dquot_dirty(transfer_from);
@@ -1921,7 +1908,6 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
over_quota:
unlock_inode_dquots(transfer_to);
unlock_inode_dquots(transfer_from);
- spin_unlock(&dqopts(inode->i_sb)->dq_data_lock);
up_write(&dqctl(inode->i_sb)->dqptr_sem);
flush_warnings(transfer_to, warntype_to);
return ret;
@@ -2552,7 +2538,6 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
(di->d_ino_hardlimit > dqi->dqi_maxilimit)))
return -ERANGE;
- spin_lock(&sb_dqopts(dquot)->dq_data_lock);
spin_lock(&dquot->dq_lock);
if (di->d_fieldmask & FS_DQ_BCOUNT) {
dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
@@ -2620,7 +2605,6 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_lock);
- spin_unlock(&sb_dqopts(dquot)->dq_data_lock);
mark_dquot_dirty(dquot);
return 0;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index c40fd80..867848b 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -219,6 +219,8 @@ struct mem_dqinfo {
* quotas on after remount RW */
struct list_head dqi_dirty_list; /* List of dirty dquots */
unsigned long dqi_flags;
+ /* Readers are allowed to read following two variables without
+ ->dq_data_lock held */
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
qsize_t dqi_maxblimit;
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 14/19] quota: protect dqget() from parallels quotaoff via RCU
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (12 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 13/19] quota: relax dq_data_lock dq_lock locking consistency Dmitry Monakhov
@ 2010-10-22 17:34 ` Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 15/19] quota: remove dq_state_lock Dmitry Monakhov
` (4 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:34 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
This allow us to remove dq_state_lock from dqget quota state test.
synchronize_rcu() in dquot_disable() is a bit strage usage for RCU,
but rcu_read_lock will be used in dqget() more properly later to
get make lookup lock-less.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 32 ++++++++++++++++++++++----------
1 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 771aaab..0d755ef 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -878,7 +878,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
/*
* Get reference to dquot
*
- * Locking is slightly tricky here. We are guarded from parallel quotaoff()
+ * We are guarded from parallel quotaoff() by holding rcu_read_lock
* destroying our dquot by:
* a) checking for quota flags under dq_list_lock and
* b) getting a reference to dquot before we release dq_list_lock
@@ -888,17 +888,15 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
struct dquot *dquot = NULL, *empty = NULL;
struct quota_info *dqopt = dqopts(sb);
- if (!sb_has_quota_active(sb, type))
- return NULL;
we_slept:
- spin_lock(&dqopt->dq_list_lock);
- spin_lock(&dqopt->dq_state_lock);
+ rcu_read_lock();
if (!sb_has_quota_active(sb, type)) {
- spin_unlock(&dqopt->dq_state_lock);
- spin_unlock(&dqopt->dq_list_lock);
- goto out;
+ rcu_read_unlock();
+ return NULL;
}
- spin_unlock(&dqopt->dq_state_lock);
+ spin_lock(&dqopt->dq_list_lock);
+ /* XXX: Currently RCU used only for synchronization with quotaoff */
+ rcu_read_unlock();
dquot = find_dquot(sb, id, type);
if (!dquot) {
@@ -2113,6 +2111,21 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
continue;
+ toputinode[cnt] = dqopt->files[cnt];
+ }
+ /*
+ * Wait for all dqget() callers to finish.
+ */
+ synchronize_rcu();
+
+ /*
+ * At this moment all quota functions disabled, is is now safe to
+ * perform final cleanup.
+ */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+
+ if (!toputinode[cnt])
+ continue;
/* Note: these are blocking operations */
drop_dquot_ref(sb, cnt);
invalidate_dquots(sb, cnt);
@@ -2126,7 +2139,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
dqopt->fmt_ops[cnt]->free_file_info(sb, cnt);
put_quota_format(dqopt->info[cnt].dqi_format);
- toputinode[cnt] = dqopt->files[cnt];
if (!sb_has_quota_loaded(sb, cnt))
dqopt->files[cnt] = NULL;
dqopt->info[cnt].dqi_flags = 0;
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 15/19] quota: remove dq_state_lock
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (13 preceding siblings ...)
2010-10-22 17:34 ` [PATCH 14/19] quota: protect dqget() from parallels quotaoff via RCU Dmitry Monakhov
@ 2010-10-22 17:35 ` Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 16/19] fs: add unlocked helpers Dmitry Monakhov
` (3 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:35 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
dqget() is the only reader which use dq_state_lock, in fact locking is not
necessery for all readers. All writers are already serialized by
dqonoff_mutex. We can safely remove dq_state_lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 19 ++-----------------
1 files changed, 2 insertions(+), 17 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0d755ef..9779800 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -87,13 +87,10 @@
* dq_lock protects dquot->dq_dqb and also guards consistency of
* dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
- * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
- * modifications of quota state (on quotaon and quotaoff) and readers who care
- * about latest values take it as well.
+ * in inode_add_bytes() and inode_sub_bytes().
*
* The spinlock ordering is hence:
- * dq_data_lock > dq_lock > dq_list_lock > i_lock,
- * dq_list_lock > dq_state_lock
+ * dq_data_lock > dq_lock > dq_list_lock > i_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
@@ -2025,7 +2022,6 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
return err;
mutex_init(&dqopt->dqio_mutex);
- spin_lock_init(&dqopt->dq_state_lock);
spin_lock_init(&dqopt->dq_data_lock);
spin_lock_init(&dqopt->dq_list_lock);
INIT_LIST_HEAD(&dqopt->dq_inuse_list);
@@ -2087,24 +2083,19 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
continue;
if (flags & DQUOT_SUSPENDED) {
- spin_lock(&dqopt->dq_state_lock);
qctl->flags |=
dquot_state_flag(DQUOT_SUSPENDED, cnt);
- spin_unlock(&dqopt->dq_state_lock);
} else {
- spin_lock(&dqopt->dq_state_lock);
qctl->flags &= ~dquot_state_flag(flags, cnt);
/* Turning off suspended quotas? */
if (!sb_has_quota_loaded(sb, cnt) &&
sb_has_quota_suspended(sb, cnt)) {
qctl->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
- spin_unlock(&dqopt->dq_state_lock);
iput(dqopt->files[cnt]);
dqopt->files[cnt] = NULL;
continue;
}
- spin_unlock(&dqopt->dq_state_lock);
}
/* We still have to keep quota loaded? */
@@ -2301,9 +2292,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
goto out_file_init;
}
mutex_unlock(&dqopt->dqio_mutex);
- spin_lock(&dqopt->dq_state_lock);
dqctl(sb)->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dqopt->dq_state_lock);
add_dquot_ref(sb, type);
mutex_unlock(&dqctl(sb)->dqonoff_mutex);
@@ -2348,12 +2337,10 @@ int dquot_resume(struct super_block *sb, int type)
}
inode = qctl->dq_opt->files[cnt];
qctl->dq_opt->files[cnt] = NULL;
- spin_lock(&dqopts(sb)->dq_state_lock);
flags = qctl->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
cnt);
qctl->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
- spin_unlock(&dqopts(sb)->dq_state_lock);
mutex_unlock(&qctl->dqonoff_mutex);
flags = dquot_generic_flag(flags, cnt);
@@ -2432,9 +2419,7 @@ int dquot_enable(struct inode *inode, int type, int format_id,
ret = -EBUSY;
goto out_lock;
}
- spin_lock(&dqopts(sb)->dq_state_lock);
qctl->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dqopts(sb)->dq_state_lock);
out_lock:
mutex_unlock(&qctl->dqonoff_mutex);
return ret;
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 16/19] fs: add unlocked helpers
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (14 preceding siblings ...)
2010-10-22 17:35 ` [PATCH 15/19] quota: remove dq_state_lock Dmitry Monakhov
@ 2010-10-22 17:35 ` Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 17/19] quota: Some stylistic cleanup for dquot interface Dmitry Monakhov
` (2 subsequent siblings)
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:35 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
inode_{add,sub}_bytes will be used by dquot code
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 50 ++++++++++++++++++++++++++++++++++++++------------
fs/stat.c | 15 ++++++++++++---
include/linux/fs.h | 2 ++
3 files changed, 52 insertions(+), 15 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9779800..1c7eea1 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -240,6 +240,7 @@ struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
static qsize_t inode_get_rsv_space(struct inode *inode);
+static qsize_t __inode_get_rsv_space(struct inode *inode);
static void __dquot_initialize(struct inode *inode, int type);
static inline unsigned int
@@ -1553,11 +1554,17 @@ void inode_add_rsv_space(struct inode *inode, qsize_t number)
}
EXPORT_SYMBOL(inode_add_rsv_space);
-void inode_claim_rsv_space(struct inode *inode, qsize_t number)
+inline void __inode_claim_rsv_space(struct inode *inode, qsize_t number)
{
- spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
+
+}
+
+void inode_claim_rsv_space(struct inode *inode, qsize_t number)
+{
+ spin_lock(&inode->i_lock);
+ __inode_claim_rsv_space(inode, number);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(inode_claim_rsv_space);
@@ -1570,33 +1577,52 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number)
}
EXPORT_SYMBOL(inode_sub_rsv_space);
-static qsize_t inode_get_rsv_space(struct inode *inode)
+static qsize_t __inode_get_rsv_space(struct inode *inode)
{
- qsize_t ret;
-
if (!dqctl(inode->i_sb)->dq_op->get_reserved_space)
return 0;
+ return *inode_reserved_space(inode);
+}
+
+static qsize_t inode_get_rsv_space(struct inode *inode)
+{
+ qsize_t ret;
spin_lock(&inode->i_lock);
- ret = *inode_reserved_space(inode);
+ ret = __inode_get_rsv_space(inode);
spin_unlock(&inode->i_lock);
return ret;
}
-static void inode_incr_space(struct inode *inode, qsize_t number,
+static void __inode_incr_space(struct inode *inode, qsize_t number,
int reserve)
{
if (reserve)
- inode_add_rsv_space(inode, number);
+ *inode_reserved_space(inode) += number;
else
- inode_add_bytes(inode, number);
+ __inode_add_bytes(inode, number);
}
-static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
+static void inode_incr_space(struct inode *inode, qsize_t number,
+ int reserve)
+{
+ spin_lock(&inode->i_lock);
+ __inode_incr_space(inode, number, reserve);
+ spin_unlock(&inode->i_lock);
+}
+
+
+static void __inode_decr_space(struct inode *inode, qsize_t number, int reserve)
{
if (reserve)
- inode_sub_rsv_space(inode, number);
+ *inode_reserved_space(inode) -= number;
else
- inode_sub_bytes(inode, number);
+ __inode_sub_bytes(inode, number);
+}
+static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
+{
+ spin_lock(&inode->i_lock);
+ __inode_decr_space(inode, number, reserve);
+ spin_unlock(&inode->i_lock);
}
/*
diff --git a/fs/stat.c b/fs/stat.c
index 12e90e2..f2da983 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -429,9 +429,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
EXPORT_SYMBOL(inode_add_bytes);
-void inode_sub_bytes(struct inode *inode, loff_t bytes)
+void __inode_sub_bytes(struct inode *inode, loff_t bytes)
{
- spin_lock(&inode->i_lock);
inode->i_blocks -= bytes >> 9;
bytes &= 511;
if (inode->i_bytes < bytes) {
@@ -439,17 +438,27 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes)
inode->i_bytes += 512;
}
inode->i_bytes -= bytes;
+}
+
+void inode_sub_bytes(struct inode *inode, loff_t bytes)
+{
+ spin_lock(&inode->i_lock);
+ __inode_sub_bytes(inode, bytes);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(inode_sub_bytes);
+inline loff_t __inode_get_bytes(struct inode *inode)
+{
+ return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+}
loff_t inode_get_bytes(struct inode *inode)
{
loff_t ret;
spin_lock(&inode->i_lock);
- ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+ ret = __inode_get_bytes(inode);
spin_unlock(&inode->i_lock);
return ret;
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e87694a..3ef2ec1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2308,7 +2308,9 @@ extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
+void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
+loff_t __inode_get_bytes(struct inode *inode);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 17/19] quota: Some stylistic cleanup for dquot interface
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (15 preceding siblings ...)
2010-10-22 17:35 ` [PATCH 16/19] fs: add unlocked helpers Dmitry Monakhov
@ 2010-10-22 17:35 ` Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 18/19] quota: remove dqptr_sem Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 19/19] quota: redesign dquot reference counting Dmitry Monakhov
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:35 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov, Dmitry Monakhov
From: Dmitry Monakhov <dmonakhov@gmail.com>
This patch performs only stylistic cleanup. No changes in logic at all.
- Rename dqget() to find_get_dquot()
- Wrap direct dq_count increment to helper function
Some places still access dq_count directly, but this is because
reference counting algorithm. It will be changed in later patches.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/ocfs2/file.c | 8 ++++----
fs/ocfs2/quota_global.c | 2 +-
fs/ocfs2/quota_local.c | 3 ++-
fs/quota/dquot.c | 42 ++++++++++++++++++++++++++----------------
include/linux/quotaops.h | 3 ++-
5 files changed, 35 insertions(+), 23 deletions(-)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9a03c15..b7e7c9b 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1205,8 +1205,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
- transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
- USRQUOTA);
+ transfer_to[USRQUOTA] =
+ find_get_dquot(sb, attr->ia_uid, USRQUOTA);
if (!transfer_to[USRQUOTA]) {
status = -ESRCH;
goto bail_unlock;
@@ -1215,8 +1215,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
- transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
- GRPQUOTA);
+ transfer_to[GRPQUOTA] =
+ find_get_dquot(sb, attr->ia_gid, GRPQUOTA);
if (!transfer_to[GRPQUOTA]) {
status = -ESRCH;
goto bail_unlock;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index b768588..4e00341 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -693,7 +693,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
mutex_lock(&dquot->dq_mutex);
- /* Check whether we are not racing with some other dqget() */
+ /* Check whether we are not racing with some other find_get_dquot() */
if (atomic_read(&dquot->dq_count) > 1)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 1490cb0..1eaf835 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -500,7 +500,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
}
dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data +
ol_dqblk_block_off(sb, chunk, bit));
- dquot = dqget(sb, le64_to_cpu(dqblk->dqb_id), type);
+ dquot = find_get_dquot(sb, le64_to_cpu(dqblk->dqb_id),
+ type);
if (!dquot) {
status = -EIO;
mlog(ML_ERROR, "Failed to get quota structure "
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 1c7eea1..a7a7670 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -16,7 +16,8 @@
* Revised list management to avoid races
* -- Bill Hawes, <whawes@star.net>, 9/98
*
- * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ * Fixed races in dquot_transfer(), find_get_dquot() and
+ * dquot_alloc_...().
* As the consequence the locking was moved from dquot_decr_...(),
* dquot_incr_...() to calling functions.
* invalidate_dquots() now writes modified dquots.
@@ -108,8 +109,9 @@
* Each dquot has its dq_mutex mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_mutex).
* Currently dquot is locked only when it is being read to memory (or space for
- * it is being allocated) on the first dqget() and when it is being released on
- * the last dqput(). The allocation and release oparations are serialized by
+ * it is being allocated) on the first find_get_dquot() and when it is being
+ * released on the last dqput().
+ * The allocation and release oparations are serialized by
* the dq_mutex and by checking the use count in dquot_release(). Write
* operations on dquots don't hold dq_mutex as they copy data under dq_data_lock
* spinlock to internal buffers before writing.
@@ -526,7 +528,7 @@ int dquot_release(struct dquot *dquot)
struct quota_info *dqopt = sb_dqopts(dquot);
mutex_lock(&dquot->dq_mutex);
- /* Check whether we are not racing with some other dqget() */
+ /* Check whether we are not racing with some other find_get_dquot() */
if (atomic_read(&dquot->dq_count) > 1)
goto out_dqlock;
mutex_lock(&dqopt->dqio_mutex);
@@ -581,7 +583,7 @@ restart:
if (atomic_read(&dquot->dq_count)) {
DEFINE_WAIT(wait);
- atomic_inc(&dquot->dq_count);
+ dqget(dquot);
prepare_to_wait(&dquot->dq_wait_unused, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&dqopt->dq_list_lock);
@@ -631,7 +633,7 @@ int dquot_scan_active(struct super_block *sb,
if (dquot->dq_sb != sb)
continue;
/* Now we have active dquot so we can just increase use count */
- atomic_inc(&dquot->dq_count);
+ dqget(dquot);
spin_unlock(&dqopt->dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
@@ -678,7 +680,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
* use count */
- atomic_inc(&dquot->dq_count);
+ dqget(dquot);
spin_unlock(&dqopt->dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
dqctl(sb)->dq_op->write_dquot(dquot);
@@ -873,6 +875,11 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
return dquot;
}
+inline void dqget(struct dquot *dquot)
+{
+ atomic_inc(&dquot->dq_count);
+}
+
/*
* Get reference to dquot
*
@@ -881,7 +888,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
* a) checking for quota flags under dq_list_lock and
* b) getting a reference to dquot before we release dq_list_lock
*/
-struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+struct dquot *find_get_dquot(struct super_block *sb, unsigned int id, int type)
{
struct dquot *dquot = NULL, *empty = NULL;
struct quota_info *dqopt = dqopts(sb);
@@ -917,7 +924,7 @@ we_slept:
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
- atomic_inc(&dquot->dq_count);
+ dqget(dquot);
spin_unlock(&dqopt->dq_list_lock);
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);
@@ -941,7 +948,7 @@ out:
return dquot;
}
-EXPORT_SYMBOL(dqget);
+EXPORT_SYMBOL(find_get_dquot);
static int dqinit_needed(struct inode *inode, int type)
{
@@ -1420,7 +1427,7 @@ static int dquot_active(const struct inode *inode)
* Initialize quota pointers in inode
*
* We do things in a bit complicated way but by that we avoid calling
- * dqget() and thus filesystem callbacks under dqptr_sem.
+ * find_get_dquot() and thus filesystem callbacks under dqptr_sem.
*
* It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation.
@@ -1451,7 +1458,7 @@ static void __dquot_initialize(struct inode *inode, int type)
id = inode->i_gid;
break;
}
- got[cnt] = dqget(sb, id, cnt);
+ got[cnt] = find_get_dquot(sb, id, cnt);
}
down_write(&dqctl(sb)->dqptr_sem);
@@ -1948,9 +1955,12 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
return 0;
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
- transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
+ transfer_to[USRQUOTA] = find_get_dquot(sb, iattr->ia_uid,
+ USRQUOTA);
+
if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
- transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
+ transfer_to[GRPQUOTA] = find_get_dquot(sb, iattr->ia_gid,
+ GRPQUOTA);
ret = __dquot_transfer(inode, transfer_to);
dqput_all(transfer_to);
@@ -2526,7 +2536,7 @@ int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
{
struct dquot *dquot;
- dquot = dqget(sb, id, type);
+ dquot = find_get_dquot(sb, id, type);
if (!dquot)
return -ESRCH;
do_get_dqblk(dquot, di);
@@ -2639,7 +2649,7 @@ int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
struct dquot *dquot;
int rc;
- dquot = dqget(sb, id, type);
+ dquot = find_get_dquot(sb, id, type);
if (!dquot) {
rc = -ESRCH;
goto out;
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 6f4cc74..4259da8 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -52,7 +52,8 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
void dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
-struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
+struct dquot*find_get_dquot(struct super_block *sb, unsigned int id, int type);
+void dqget(struct dquot *dquot);
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 18/19] quota: remove dqptr_sem
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (16 preceding siblings ...)
2010-10-22 17:35 ` [PATCH 17/19] quota: Some stylistic cleanup for dquot interface Dmitry Monakhov
@ 2010-10-22 17:35 ` Dmitry Monakhov
2010-10-22 17:35 ` [PATCH 19/19] quota: redesign dquot reference counting Dmitry Monakhov
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:35 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
dqptr_sem is one of the most contenting locks for now
each dquot_initialize and dquot_transfer result in down_write(dqptr_sem)
Let's user inode->i_lock to protect i_dquot pointers. In that case all
users which modified i_dquot simply converted to that lock. But users
who hold the dqptr_sem for read(charge/uncharge methods) usually
looks like follows
down_read(&dqptr_sem)
___charge_quota()
make_quota_dirty(inode->i_dquot) --> may_sleep
up_read(&dquot_sem)
We must drop i_lock before make_quota_dirty or flush_warnings,
to protect dquot from being fried let's grab extra reference for dquot,
and drop it after we have done with dquot object.
XXX: There is a plan to get rid of extra dqget/dqput calls by protecting
dquot destruction by SRCU.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 145 ++++++++++++++++++++++++++--------------------
fs/super.c | 1 -
include/linux/quota.h | 1 -
include/linux/quotaops.h | 4 +-
4 files changed, 84 insertions(+), 67 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a7a7670..5c8ad82 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -91,18 +91,16 @@
* in inode_add_bytes() and inode_sub_bytes().
*
* The spinlock ordering is hence:
- * dq_data_lock > dq_lock > dq_list_lock > i_lock
+ * dq_data_lock > i_lock > dq_lock > dq_list_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
*
- * Any operation working on dquots via inode pointers must hold dqptr_sem. If
- * operation is just reading pointers from inode (or not using them at all) the
- * read lock is enough. If pointers are altered function must hold write lock.
+ * Any operation working on dquots via inode pointers must hold i_lock.
* Special care needs to be taken about S_NOQUOTA inode flag (marking that
* inode is a quota file). Functions adding pointers from inode to dquots have
- * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
- * have to do all pointer modifications before dropping dqptr_sem. This makes
+ * to check this flag under i_lock and then (if S_NOQUOTA is not set) they
+ * have to do all pointer modifications before dropping i_lock. This makes
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode.
*
@@ -117,14 +115,8 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_mutex >
+ * i_mutex > dqonoff_sem > journal_lock > dquot->dq_mutex >
* dqio_mutex
- * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
- * dqptr_sem. But filesystem has to count with the fact that functions such as
- * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
- * from inside a transaction to keep filesystem consistency after a crash. Also
- * filesystems usually want to do some IO on dquot from ->mark_dirty which is
- * called with dqptr_sem held.
* i_mutex on quota files is special (it's below dqio_mutex)
*/
@@ -1024,15 +1016,18 @@ static inline int dqput_blocks(struct dquot *dquot)
/*
* Remove references to dquots from inode and add dquot to list for freeing
* if we have the last referece to dquot
- * We can't race with anybody because we hold dqptr_sem for writing...
*/
static int remove_inode_dquot_ref(struct inode *inode, int type,
struct list_head *tofree_head)
{
- struct dquot *dquot = inode->i_dquot[type];
+ struct dquot *dquot;
struct quota_info *dqopt = dqopts(inode->i_sb);
+ spin_lock(&inode->i_lock);
+ dquot = inode->i_dquot[type];
inode->i_dquot[type] = NULL;
+ spin_unlock(&inode->i_lock);
+
if (dquot) {
if (dqput_blocks(dquot)) {
#ifdef CONFIG_QUOTA_DEBUG
@@ -1086,7 +1081,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
* We have to scan also I_NEW inodes because they can already
* have quota pointer initialized. Luckily, we need to touch
* only quota pointers and these have separate locking
- * (dqptr_sem).
+ * (i_lock).
*/
if (!IS_NOQUOTA(inode)) {
if (unlikely(inode_get_rsv_space(inode) > 0))
@@ -1110,9 +1105,7 @@ static void drop_dquot_ref(struct super_block *sb, int type)
LIST_HEAD(tofree_head);
if (dqctl(sb)->dq_op) {
- down_write(&dqctl(sb)->dqptr_sem);
remove_dquot_ref(sb, type, &tofree_head);
- up_write(&dqctl(sb)->dqptr_sem);
put_dquot_list(&tofree_head);
}
}
@@ -1426,9 +1419,6 @@ static int dquot_active(const struct inode *inode)
/*
* Initialize quota pointers in inode
*
- * We do things in a bit complicated way but by that we avoid calling
- * find_get_dquot() and thus filesystem callbacks under dqptr_sem.
- *
* It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation.
*/
@@ -1461,7 +1451,8 @@ static void __dquot_initialize(struct inode *inode, int type)
got[cnt] = find_get_dquot(sb, id, cnt);
}
- down_write(&dqctl(sb)->dqptr_sem);
+ spin_lock(&inode->i_lock);
+ rsv = __inode_get_rsv_space(inode);
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1480,7 +1471,6 @@ static void __dquot_initialize(struct inode *inode, int type)
* Make quota reservation system happy if someone
* did a write before quota was turned on
*/
- rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) {
spin_lock(&got[cnt]->dq_lock);
dquot_resv_space(got[cnt], rsv);
@@ -1489,7 +1479,7 @@ static void __dquot_initialize(struct inode *inode, int type)
}
}
out_err:
- up_write(&dqctl(sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
/* Drop unused references */
dqput_all(got);
}
@@ -1508,12 +1498,12 @@ static void __dquot_drop(struct inode *inode)
int cnt;
struct dquot *put[MAXQUOTAS];
- down_write(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = inode->i_dquot[cnt];
inode->i_dquot[cnt] = NULL;
}
- up_write(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
dqput_all(put);
}
@@ -1652,6 +1642,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
int warn = flags & DQUOT_SPACE_WARN;
int reserve = flags & DQUOT_SPACE_RESERVE;
int nofail = flags & DQUOT_SPACE_NOFAIL;
+ struct dquot *dquot[MAXQUOTAS] = {};
/*
* First test before acquiring mutex - solves deadlocks when we
@@ -1662,18 +1653,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out;
}
- down_read(&dqctl(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
+ spin_lock(&inode->i_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
+ dquot[cnt] = inode->i_dquot[cnt];
+ dqget(dquot[cnt]);
ret = check_bdq(inode->i_dquot[cnt], number, !warn,
warntype + cnt);
if (ret && !nofail) {
unlock_inode_dquots(inode->i_dquot);
+ spin_unlock(&inode->i_lock);
goto out_flush_warn;
}
}
@@ -1685,15 +1679,14 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
else
dquot_incr_space(inode->i_dquot[cnt], number);
}
- inode_incr_space(inode, number, reserve);
+ __inode_incr_space(inode, number, reserve);
unlock_inode_dquots(inode->i_dquot);
-
- if (reserve)
- goto out_flush_warn;
- mark_all_dquot_dirty(inode->i_dquot);
+ spin_unlock(&inode->i_lock);
+ if (!reserve)
+ mark_all_dquot_dirty(dquot);
out_flush_warn:
- flush_warnings(inode->i_dquot, warntype);
- up_read(&dqctl(inode->i_sb)->dqptr_sem);
+ flush_warnings(dquot, warntype);
+ dqput_all(dquot);
out:
return ret;
}
@@ -1702,10 +1695,11 @@ EXPORT_SYMBOL(__dquot_alloc_space);
/*
* This operation can block, but only after everything is updated
*/
-int dquot_alloc_inode(const struct inode *inode)
+int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0;
char warntype[MAXQUOTAS];
+ struct dquot *dquot[MAXQUOTAS] = {};
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
@@ -1713,11 +1707,14 @@ int dquot_alloc_inode(const struct inode *inode)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&dqctl(inode->i_sb)->dqptr_sem);
+
+ spin_lock(&inode->i_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
+ dquot[cnt] = inode->i_dquot[cnt];
+ dqget(dquot[cnt]);
ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
if (ret)
goto warn_put_all;
@@ -1731,10 +1728,11 @@ int dquot_alloc_inode(const struct inode *inode)
warn_put_all:
unlock_inode_dquots(inode->i_dquot);
+ spin_unlock(&inode->i_lock);
if (ret == 0)
- mark_all_dquot_dirty(inode->i_dquot);
- flush_warnings(inode->i_dquot, warntype);
- up_read(&dqctl(inode->i_sb)->dqptr_sem);
+ mark_all_dquot_dirty(dquot);
+ flush_warnings(dquot, warntype);
+ dqput_all(dquot);
return ret;
}
EXPORT_SYMBOL(dquot_alloc_inode);
@@ -1745,25 +1743,30 @@ EXPORT_SYMBOL(dquot_alloc_inode);
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
int cnt;
+ struct dquot *dquot[MAXQUOTAS] = {};
if (!dquot_active(inode)) {
inode_claim_rsv_space(inode, number);
return 0;
}
- down_read(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_lock(&inode->i_lock);
lock_inode_dquots(inode->i_dquot);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt])
+ if (inode->i_dquot[cnt]) {
+ dquot[cnt] = inode->i_dquot[cnt];
+ dqget(dquot[cnt]);
dquot_claim_reserved_space(inode->i_dquot[cnt],
number);
+ }
}
/* Update inode bytes */
- inode_claim_rsv_space(inode, number);
+ __inode_claim_rsv_space(inode, number);
unlock_inode_dquots(inode->i_dquot);
- mark_all_dquot_dirty(inode->i_dquot);
- up_read(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
+ mark_all_dquot_dirty(dquot);
+ dqput_all(dquot);
return 0;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1776,6 +1779,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
unsigned int cnt;
char warntype[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
+ struct dquot *dquot[MAXQUOTAS] = {};
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
@@ -1784,54 +1788,60 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
return;
}
- down_read(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_lock(&inode->i_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
+ dquot[cnt] = inode->i_dquot[cnt];
+ dqget(dquot[cnt]);
warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
if (reserve)
dquot_free_reserved_space(inode->i_dquot[cnt], number);
else
dquot_decr_space(inode->i_dquot[cnt], number);
}
- inode_decr_space(inode, number, reserve);
+ __inode_decr_space(inode, number, reserve);
unlock_inode_dquots(inode->i_dquot);
+ spin_unlock(&inode->i_lock);
- if (reserve)
- goto out_unlock;
- mark_all_dquot_dirty(inode->i_dquot);
-out_unlock:
- flush_warnings(inode->i_dquot, warntype);
- up_read(&dqctl(inode->i_sb)->dqptr_sem);
+ if (!reserve)
+ mark_all_dquot_dirty(dquot);
+ flush_warnings(dquot, warntype);
+ dqput_all(dquot);
}
EXPORT_SYMBOL(__dquot_free_space);
/*
* This operation can block, but only after everything is updated
*/
-void dquot_free_inode(const struct inode *inode)
+void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
+ struct dquot *dquot[MAXQUOTAS] = {};
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (!dquot_active(inode))
return;
- down_read(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_lock(&inode->i_lock);
lock_inode_dquots(inode->i_dquot);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
+ dquot[cnt] = inode->i_dquot[cnt];
+ dqget(dquot[cnt]);
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
dquot_decr_inodes(inode->i_dquot[cnt], 1);
}
unlock_inode_dquots(inode->i_dquot);
- mark_all_dquot_dirty(inode->i_dquot);
- flush_warnings(inode->i_dquot, warntype);
- up_read(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
+
+ mark_all_dquot_dirty(dquot);
+ flush_warnings(dquot, warntype);
+ dqput_all(dquot);
}
EXPORT_SYMBOL(dquot_free_inode);
@@ -1862,9 +1872,10 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype_to[cnt] = QUOTA_NL_NOWARN;
- down_write(&dqctl(inode->i_sb)->dqptr_sem);
+
+ spin_lock(&inode->i_lock);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
- up_write(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
return 0;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1873,15 +1884,19 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
*/
if (!transfer_to[cnt])
continue;
+ dqget(transfer_to[cnt]);
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(inode->i_sb, cnt))
continue;
is_valid[cnt] = 1;
transfer_from[cnt] = inode->i_dquot[cnt];
+ if (transfer_from[cnt])
+ dqget(transfer_from[cnt]);
+
}
lock_dquot_double(transfer_from, transfer_to);
- cur_space = inode_get_bytes(inode);
- rsv_space = inode_get_rsv_space(inode);
+ cur_space = __inode_get_bytes(inode);
+ rsv_space = __inode_get_rsv_space(inode);
space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1921,13 +1936,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
}
unlock_inode_dquots(transfer_to);
unlock_inode_dquots(transfer_from);
- up_write(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
flush_warnings(transfer_to, warntype_to);
flush_warnings(transfer_from, warntype_from_inodes);
flush_warnings(transfer_from, warntype_from_space);
+ dqput_all(transfer_to);
+ dqput_all(transfer_from);
/* Pass back references to put */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (is_valid[cnt])
@@ -1936,8 +1953,10 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
over_quota:
unlock_inode_dquots(transfer_to);
unlock_inode_dquots(transfer_from);
- up_write(&dqctl(inode->i_sb)->dqptr_sem);
+ spin_unlock(&inode->i_lock);
flush_warnings(transfer_to, warntype_to);
+ dqput_all(transfer_to);
+ dqput_all(transfer_from);
return ret;
}
EXPORT_SYMBOL(__dquot_transfer);
diff --git a/fs/super.c b/fs/super.c
index 9eea8e9..2f2090c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -104,7 +104,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
mutex_init(&s->s_vfs_rename_mutex);
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
mutex_init(&s->s_dquot.dqonoff_mutex);
- init_rwsem(&s->s_dquot.dqptr_sem);
init_waitqueue_head(&s->s_wait_unfrozen);
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 867848b..834ed1b 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -409,7 +409,6 @@ struct quota_ctl_info {
unsigned int flags; /* Flags for diskquotas on this device */
struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
- struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
const struct quotactl_ops *qcop;
const struct dquot_operations *dq_op;
struct quota_info *dq_opt;
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 4259da8..2675ff7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot);
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
-int dquot_alloc_inode(const struct inode *inode);
+int dquot_alloc_inode(struct inode *inode);
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
-void dquot_free_inode(const struct inode *inode);
+void dquot_free_inode(struct inode *inode);
int dquot_disable(struct super_block *sb, int type, unsigned int flags);
/* Suspend quotas on remount RO */
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH 19/19] quota: redesign dquot reference counting
2010-10-22 17:34 [PATCH 00/19] quota: RFC SMP improvements for generic quota V2 Dmitry Monakhov
` (17 preceding siblings ...)
2010-10-22 17:35 ` [PATCH 18/19] quota: remove dqptr_sem Dmitry Monakhov
@ 2010-10-22 17:35 ` Dmitry Monakhov
18 siblings, 0 replies; 26+ messages in thread
From: Dmitry Monakhov @ 2010-10-22 17:35 UTC (permalink / raw)
To: linux-fsdevel; +Cc: jack, hch, Dmitry Monakhov
Currently each find_get_dquot() goes trough dq_mutex regardless to
dq_count value. This just kill system performance.
With help of small modifications we can avoid locking on the mutex
on fast path.
XXX: cmpxch is not the fastest operation in the world. Right now i can't
If it become a problem we can easily switch from atomic operations to
counter protected by dq_lock.
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
---
fs/quota/dquot.c | 117 +++++++++++++++++++++++++++++++++++-------------
include/linux/quota.h | 3 +-
2 files changed, 87 insertions(+), 33 deletions(-)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5c8ad82..a5577fd 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -233,6 +233,32 @@ static void put_quota_format(struct quota_format_type *fmt)
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
+/**
+ * cmpxch not the fastest operation in the world, but still better
+ * than spin_lock in our case.
+ * atomic_add_if_greater - add if the number is greater than value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...if v is greater to u.
+ *
+ * Atomically adds @a to @v, so long as @v is greater than @u.
+ * Returns non-zero if @v was greater than @u, and zero otherwise.
+ */
+static inline int atomic_add_if_greater(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c <= (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c > (u);
+}
+
static qsize_t inode_get_rsv_space(struct inode *inode);
static qsize_t __inode_get_rsv_space(struct inode *inode);
static void __dquot_initialize(struct inode *inode, int type);
@@ -313,13 +339,6 @@ static inline void remove_inuse(struct dquot *dquot)
/*
* End of list functions needing dq_list_lock
*/
-
-static void wait_on_dquot(struct dquot *dquot)
-{
- mutex_lock(&dquot->dq_mutex);
- mutex_unlock(&dquot->dq_mutex);
-}
-
static inline int dquot_dirty(struct dquot *dquot)
{
return test_bit(DQ_MOD_B, &dquot->dq_flags);
@@ -738,12 +757,16 @@ static void prune_one_sb_dqcache(struct super_block *sb, void *arg)
head = dqopt->dq_free_list.prev;
while (head != &dqopt->dq_free_list && count) {
dquot = list_entry(head, struct dquot, dq_free);
+ head = dqopt->dq_free_list.prev;
+ /* If someone is waiting for this dquot to becomes active.
+ skip it.*/
+ if (test_bit(DQ_WAIT_B, &dquot->dq_flags))
+ continue;
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
count--;
- head = dqopt->dq_free_list.prev;
}
spin_unlock(&dqopt->dq_list_lock);
mutex_unlock(&dqctl(sb)->dqonoff_mutex);
@@ -782,6 +805,9 @@ void dqput(struct dquot *dquot)
if (!dquot)
return;
+ dqopt = sb_dqopts(dquot);
+ dqstats_inc(DQST_DROPS);
+we_slept:
#ifdef CONFIG_QUOTA_DEBUG
if (!atomic_read(&dquot->dq_count)) {
quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
@@ -789,16 +815,18 @@ void dqput(struct dquot *dquot)
BUG();
}
#endif
- dqopt = sb_dqopts(dquot);
- dqstats_inc(DQST_DROPS);
-we_slept:
+
+ /* If count is greater than 2 we don't have to grab any locks */
+ if (atomic_add_if_greater(&dquot->dq_count, -1, 2)) {
+ /* fastpath, nothing to be done there */
+ return;
+ }
spin_lock(&dqopt->dq_list_lock);
- if (atomic_read(&dquot->dq_count) > 1) {
+ if (atomic_add_unless(&dquot->dq_count, -1, 1)) {
/* We have more than one user... nothing to do */
- atomic_dec(&dquot->dq_count);
/* Releasing dquot during quotaoff phase? */
- if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
- atomic_read(&dquot->dq_count) == 1)
+ if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type)
+ && atomic_read(&dquot->dq_count) == 1)
wake_up(&dquot->dq_wait_unused);
spin_unlock(&dqopt->dq_list_lock);
return;
@@ -872,6 +900,34 @@ inline void dqget(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
}
+static int dqget_stable(struct dquot *dquot)
+{
+ if (atomic_add_if_greater(&dquot->dq_count, 1, 1))
+ /* Reference was successfully incremented */
+ return 1;
+ /*
+ * dquot is in unstable state. In order to serialize with
+ * dquot_release() we have to wait on ->dq_mutex, but object may be
+ * belongs free list and may be pruned from cache at any moment after
+ * we drop list_lock.
+ * To protect dquot from that we set the WAIT bit.
+ */
+ set_bit(DQ_WAIT_B, &dquot->dq_flags);
+ spin_unlock(&sb_dqopts(dquot)->dq_list_lock);
+ /*
+ * Increment count under mutex, to serlialize with dquot_release().
+ * After we are guaranteed from later release attempts.
+ */
+ mutex_lock(&dquot->dq_mutex);
+ dqget(dquot);
+ mutex_unlock(&dquot->dq_mutex);
+
+ spin_lock(&sb_dqopts(dquot)->dq_list_lock);
+ remove_free_dquot(dquot);
+ clear_bit(DQ_WAIT_B, &dquot->dq_flags);
+ return 0;
+}
+
/*
* Get reference to dquot
*
@@ -896,7 +952,12 @@ we_slept:
rcu_read_unlock();
dquot = find_dquot(sb, id, type);
- if (!dquot) {
+ if (dquot) {
+ dqget_stable(dquot);
+ spin_unlock(&dqopt->dq_list_lock);
+ dqstats_inc(DQST_CACHE_HITS);
+ goto found;
+ } else {
if (!empty) {
spin_unlock(&dqopt->dq_list_lock);
empty = get_empty_dquot(sb, type);
@@ -912,24 +973,16 @@ we_slept:
/* hash it first so it can be found */
insert_dquot_hash(dquot);
spin_unlock(&dqopt->dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
- } else {
- if (!atomic_read(&dquot->dq_count))
- remove_free_dquot(dquot);
- dqget(dquot);
- spin_unlock(&dqopt->dq_list_lock);
- dqstats_inc(DQST_CACHE_HITS);
- dqstats_inc(DQST_LOOKUPS);
}
- /* Wait for dq_mutex - after this we know that either dquot_release() is
- * already finished or it will be canceled due to dq_count > 1 test */
- wait_on_dquot(dquot);
+found:
+ dqstats_inc(DQST_LOOKUPS);
/* Read the dquot / allocate space in quota file */
- if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
- dqctl(sb)->dq_op->acquire_dquot(dquot) < 0) {
- dqput(dquot);
- dquot = NULL;
- goto out;
+ if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ if (dqctl(sb)->dq_op->acquire_dquot(dquot) < 0) {
+ dqput(dquot);
+ dquot = NULL;
+ goto out;
+ }
}
#ifdef CONFIG_QUOTA_DEBUG
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 834ed1b..2260fa3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -276,7 +276,8 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_WAIT_B 6 /* Do not prune this dquot from free list */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
--
1.6.5.2
^ permalink raw reply related [flat|nested] 26+ messages in thread