* [PATCH 01/34] md: fix required/prohibited spaces [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 02/34] md: fix 'foo*' and 'foo * bar' [ERROR] heinzm
` (33 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Once on it, replace comparisons to 0 (i.e. == 0) with negation
as recommended by "checkpatch .pl --strict ...".
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-autodetect.c | 10 ++--
drivers/md/md-bitmap.c | 15 +++---
drivers/md/md-bitmap.h | 2 +-
drivers/md/md-cluster.c | 2 +-
drivers/md/md-faulty.c | 22 ++++----
drivers/md/md-linear.c | 2 +-
drivers/md/md-multipath.c | 9 ++--
drivers/md/md.c | 106 ++++++++++++++++++-------------------
drivers/md/md.h | 2 +-
drivers/md/raid0.c | 10 ++--
drivers/md/raid1.c | 20 +++----
drivers/md/raid10.c | 28 +++++-----
drivers/md/raid5.c | 40 +++++++-------
include/linux/raid/pq.h | 2 +-
14 files changed, 132 insertions(+), 138 deletions(-)
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 91836e6de326..46090cdd02ba 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -23,7 +23,7 @@
#ifdef CONFIG_MD_AUTODETECT
static int __initdata raid_noautodetect;
#else
-static int __initdata raid_noautodetect=1;
+static int __initdata raid_noautodetect = 1;
#endif
static int __initdata raid_autopart;
@@ -73,7 +73,7 @@ static int __init md_setup(char *str)
return 0;
}
str1 = str;
- for (ent=0 ; ent< md_setup_ents ; ent++)
+ for (ent = 0; ent < md_setup_ents; ent++)
if (md_setup_args[ent].minor == minor &&
md_setup_args[ent].partitioned == partitioned) {
printk(KERN_WARNING "md: md=%s%d, Specified more than once. "
@@ -108,7 +108,7 @@ static int __init md_setup(char *str)
fallthrough;
case 0:
md_setup_args[ent].level = LEVEL_NONE;
- pername="super-block";
+ pername = "super-block";
}
printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n",
@@ -243,9 +243,9 @@ static int __init raid_setup(char *str)
raid_noautodetect = 1;
if (!strncmp(str, "autodetect", wlen))
raid_noautodetect = 0;
- if (strncmp(str, "partitionable", wlen)==0)
+ if (!strncmp(str, "partitionable", wlen))
raid_autopart = 1;
- if (strncmp(str, "part", wlen)==0)
+ if (!strncmp(str, "part", wlen))
raid_autopart = 1;
pos += wlen+1;
}
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index e7cc6ba1b657..f2192aa8b826 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -156,7 +156,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
sector_t target;
rdev_for_each(rdev, mddev) {
- if (! test_bit(In_sync, &rdev->flags)
+ if (!test_bit(In_sync, &rdev->flags)
|| test_bit(Faulty, &rdev->flags)
|| test_bit(Bitmap_sync, &rdev->flags))
continue;
@@ -307,8 +307,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
}
if (wait)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
+ wait_event(bitmap->write_wait, !atomic_read(&bitmap->pending_writes));
}
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
md_bitmap_file_kick(bitmap);
@@ -401,8 +400,8 @@ static int read_page(struct file *file, unsigned long index,
}
page->index = index;
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
+ wait_event(bitmap->write_wait, !atomic_read(&bitmap->pending_writes));
+
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
ret = -EIO;
out:
@@ -426,8 +425,7 @@ static int read_page(struct file *file, unsigned long index,
static void md_bitmap_wait_writes(struct bitmap *bitmap)
{
if (bitmap->storage.file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
+ wait_event(bitmap->write_wait, !atomic_read(&bitmap->pending_writes));
else
/* Note that we ignore the return value. The writes
* might have failed, but that would just mean that
@@ -2629,8 +2627,7 @@ behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
}
static struct md_sysfs_entry max_backlog_used =
-__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
- behind_writes_used_show, behind_writes_used_reset);
+__ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset);
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index cfd7395de8fd..0433a0a96c95 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -116,7 +116,7 @@ typedef __u16 bitmap_counter_t;
enum bitmap_state {
BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
BITMAP_WRITE_ERROR = 2, /* A write error has occurred */
- BITMAP_HOSTENDIAN =15,
+ BITMAP_HOSTENDIAN = 15,
};
/* the superblock at the front of the bitmap file -- little endian */
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 10e0c5381d01..9bcf816b80a1 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -55,7 +55,7 @@ struct resync_info {
* set up all the related infos such as bitmap and personality */
#define MD_CLUSTER_ALREADY_IN_CLUSTER 6
#define MD_CLUSTER_PENDING_RECV_EVENT 7
-#define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8
+#define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8
struct md_cluster_info {
struct mddev *mddev; /* the md device which md_cluster_info belongs to */
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 50ad818978a4..b228447e1f88 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -101,7 +101,7 @@ static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end,
{
/* If we find a ReadFixable sector, we fix it ... */
int i;
- for (i=0; i<conf->nfaults; i++)
+ for (i = 0; i < conf->nfaults; i++)
if (conf->faults[i] >= start &&
conf->faults[i] < end) {
/* found it ... */
@@ -125,9 +125,9 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
{
int i;
int n = conf->nfaults;
- for (i=0; i<conf->nfaults; i++)
+ for (i = 0; i < conf->nfaults; i++)
if (conf->faults[i] == start) {
- switch(mode) {
+ switch (mode) {
case NoPersist: conf->modes[i] = mode; return;
case WritePersistent:
if (conf->modes[i] == ReadPersistent ||
@@ -223,28 +223,28 @@ static void faulty_status(struct seq_file *seq, struct mddev *mddev)
struct faulty_conf *conf = mddev->private;
int n;
- if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
+ if ((n = atomic_read(&conf->counters[WriteTransient])))
seq_printf(seq, " WriteTransient=%d(%d)",
n, conf->period[WriteTransient]);
- if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
+ if ((n = atomic_read(&conf->counters[ReadTransient])))
seq_printf(seq, " ReadTransient=%d(%d)",
n, conf->period[ReadTransient]);
- if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
+ if ((n = atomic_read(&conf->counters[WritePersistent])))
seq_printf(seq, " WritePersistent=%d(%d)",
n, conf->period[WritePersistent]);
- if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
+ if ((n = atomic_read(&conf->counters[ReadPersistent])))
seq_printf(seq, " ReadPersistent=%d(%d)",
n, conf->period[ReadPersistent]);
- if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
+ if ((n = atomic_read(&conf->counters[ReadFixable])))
seq_printf(seq, " ReadFixable=%d(%d)",
n, conf->period[ReadFixable]);
- if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
+ if ((n = atomic_read(&conf->counters[WriteAll])))
seq_printf(seq, " WriteAll");
seq_printf(seq, " nfaults=%d", conf->nfaults);
@@ -265,7 +265,7 @@ static int faulty_reshape(struct mddev *mddev)
conf->nfaults = 0;
else if (mode == ClearErrors) {
int i;
- for (i=0 ; i < Modes ; i++) {
+ for (i = 0; i < Modes ; i++) {
conf->period[i] = 0;
atomic_set(&conf->counters[i], 0);
}
@@ -304,7 +304,7 @@ static int faulty_run(struct mddev *mddev)
if (!conf)
return -ENOMEM;
- for (i=0; i<Modes; i++) {
+ for (i = 0; i < Modes; i++) {
atomic_set(&conf->counters[i], 0);
conf->period[i] = 0;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 6e7797b4e738..c0ad603f37a6 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -172,7 +172,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = rdev->saved_raid_disk;
rdev->saved_raid_disk = -1;
- newconf = linear_conf(mddev,mddev->raid_disks+1);
+ newconf = linear_conf(mddev, mddev->raid_disks+1);
if (!newconf)
return -ENOMEM;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 66edf5e72bd6..d772143060bb 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -198,8 +198,7 @@ static void print_multipath_conf (struct mpconf *conf)
tmp = conf->multipaths + i;
if (tmp->rdev)
pr_debug(" disk%d, o:%d, dev:%pg\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- tmp->rdev->bdev);
+ i, !test_bit(Faulty, &tmp->rdev->flags), tmp->rdev->bdev);
}
}
@@ -218,7 +217,7 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
print_multipath_conf(conf);
for (path = first; path <= last; path++)
- if ((p=conf->multipaths+path)->rdev == NULL) {
+ if (!(p = conf->multipaths+path)->rdev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -303,7 +302,7 @@ static void multipathd(struct md_thread *thread)
bio = &mp_bh->bio;
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
- if ((mp_bh->path = multipath_map (conf))<0) {
+ if ((mp_bh->path = multipath_map(conf)) < 0) {
pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
@@ -447,7 +446,7 @@ static struct md_personality multipath_personality =
.status = multipath_status,
.error_handler = multipath_error,
.hot_add_disk = multipath_add_disk,
- .hot_remove_disk= multipath_remove_disk,
+ .hot_remove_disk = multipath_remove_disk,
.size = multipath_size,
};
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 927a43db5dfb..15be41044d32 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -871,7 +871,7 @@ static struct md_personality *find_pers(int level, char *clevel)
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
- if (strcmp(pers->name, clevel)==0)
+ if (!strcmp(pers->name, clevel))
return pers;
}
return NULL;
@@ -974,7 +974,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
int md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
- wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+ wait_event(mddev->sb_wait, !atomic_read(&mddev->pending_writes));
if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
return -EAGAIN;
return 0;
@@ -1036,8 +1036,8 @@ static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
int ret;
mdp_super_t *tmp1, *tmp2;
- tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
- tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
+ tmp1 = kmalloc(sizeof(*tmp1), GFP_KERNEL);
+ tmp2 = kmalloc(sizeof(*tmp2), GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
@@ -1335,7 +1335,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
- memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
+ memcpy(mddev->uuid+12, &sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
@@ -1417,7 +1417,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
* been initialised or not.
*/
int i;
- int active=0, working=0,failed=0,spare=0,nr_disks=0;
+ int active = 0, working = 0, failed = 0, spare = 0, nr_disks = 0;
rdev->sb_size = MD_SB_BYTES;
@@ -1432,7 +1432,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
- memcpy(&sb->set_uuid3, mddev->uuid+12,4);
+ memcpy(&sb->set_uuid3, mddev->uuid+12, 4);
sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
sb->level = mddev->level;
@@ -1521,7 +1521,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
d->state |= (1<<MD_DISK_FAILFAST);
}
/* now set the "removed" and "faulty" bits on any missing devices */
- for (i=0 ; i < mddev->raid_disks ; i++) {
+ for (i = 0; i < mddev->raid_disks; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
@@ -1616,7 +1616,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
* 1: At start of device
* 2: 4K from start of device.
*/
- switch(minor_version) {
+ switch (minor_version) {
case 0:
sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
sb_start &= ~(sector_t)(4*2-1);
@@ -1908,7 +1908,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
- switch(role) {
+
+ switch (role) {
case MD_DISK_ROLE_SPARE: /* spare */
break;
case MD_DISK_ROLE_FAULTY: /* faulty */
@@ -2043,7 +2044,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
if (rdev->badblocks.count == 0)
- /* Nothing to do for bad blocks*/ ;
+ /* Nothing to do for bad blocks*/;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
@@ -2091,7 +2092,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
} else
max_dev = le32_to_cpu(sb->max_dev);
- for (i=0; i<max_dev;i++)
+ for (i = 0; i < max_dev; i++)
sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
@@ -2720,7 +2721,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
- mddev->events ++;
+ mddev->events++;
mddev->can_decrease_events = nospares;
}
@@ -2753,7 +2754,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
- md_super_write(mddev,rdev,
+ md_super_write(mddev, rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
pr_debug("md: (write) %pg's sb offset: %llu\n",
@@ -3122,7 +3123,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (test_bit(Journal, &rdev->flags))
return -EBUSY;
- if (strncmp(buf, "none", 4)==0)
+ if (!strncmp(buf, "none", 4))
slot = -1;
else {
err = kstrtouint(buf, 10, (unsigned int *)&slot);
@@ -3242,7 +3243,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
return -EINVAL;
if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
@@ -3841,7 +3842,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
return len;
}
static struct md_sysfs_entry md_safe_delay =
-__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
+__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR, safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
@@ -4315,7 +4316,7 @@ static char *array_states[] = {
static int match_word(const char *word, char **list)
{
int n;
- for (n=0; list[n]; n++)
+ for (n = 0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
@@ -4327,7 +4328,7 @@ array_state_show(struct mddev *mddev, char *page)
enum array_state st = inactive;
if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
- switch(mddev->ro) {
+ switch (mddev->ro) {
case MD_RDONLY:
st = readonly;
break;
@@ -4395,7 +4396,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
err = -EINVAL;
- switch(st) {
+ switch (st) {
case bad_word:
break;
case clear:
@@ -4725,11 +4726,11 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
}
major = simple_strtoul(buf, &e, 10);
err = -EINVAL;
- if (e==buf || *e != '.')
+ if (e == buf || *e != '.')
goto out_unlock;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
- if (e==buf || (*e && *e != '\n') )
+ if (e == buf || (*e && *e != '\n'))
goto out_unlock;
err = -ENOENT;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
@@ -4879,7 +4880,7 @@ static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
- mddev->sync_speed_min ? "local": "system");
+ mddev->sync_speed_min ? "local" : "system");
}
static ssize_t
@@ -4888,7 +4889,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int min;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (!strncmp(buf, "system", 6)) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
@@ -4908,7 +4909,7 @@ static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
- mddev->sync_speed_max ? "local": "system");
+ mddev->sync_speed_max ? "local" : "system");
}
static ssize_t
@@ -4917,7 +4918,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int max;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (!strncmp(buf, "system", 6)) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
@@ -6303,7 +6304,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- pr_warn("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n", mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -6365,7 +6366,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mddev->sysfs_active ||
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- pr_warn("md: %s still in use.\n",mdname(mddev));
+ pr_warn("md: %s still in use.\n", mdname(mddev));
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -6549,7 +6550,7 @@ static int get_version(void __user *arg)
static int get_array_info(struct mddev *mddev, void __user *arg)
{
mdu_array_info_t info;
- int nr,working,insync,failed,spare;
+ int nr, working, insync, failed, spare;
struct md_rdev *rdev;
nr = working = insync = failed = spare = 0;
@@ -6582,7 +6583,7 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
- info.not_persistent= !mddev->persistent;
+ info.not_persistent = !mddev->persistent;
info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
info.state = 0;
@@ -6681,7 +6682,7 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
{
struct md_rdev *rdev;
- dev_t dev = MKDEV(info->major,info->minor);
+ dev_t dev = MKDEV(info->major, info->minor);
if (mddev_is_clustered(mddev) &&
!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
@@ -7140,7 +7141,7 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
- mddev->persistent = ! info->not_persistent;
+ mddev->persistent = !info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
@@ -7729,7 +7730,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
mddev->hold_active = 0;
mddev_unlock(mddev);
out:
- if(did_set_md_closing)
+ if (did_set_md_closing)
clear_bit(MD_CLOSING, &mddev->flags);
return err;
}
@@ -8078,7 +8079,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
- while ( max_sectors/2 > (1ULL<<(scale+32)))
+ while (max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
@@ -8096,9 +8097,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
- (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
+ (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
"reshape" :
- (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
+ (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) ?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
@@ -8162,7 +8163,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
return (void*)1;
spin_lock(&all_mddevs_lock);
- list_for_each(tmp,&all_mddevs)
+ list_for_each(tmp, &all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
if (!mddev_get(mddev))
@@ -8290,7 +8291,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
- seq_printf(seq," super %d.%d",
+ seq_printf(seq, " super %d.%d",
mddev->major_version,
mddev->minor_version);
}
@@ -8708,11 +8709,11 @@ void md_do_sync(struct md_thread *thread)
struct mddev *mddev = thread->mddev;
struct mddev *mddev2;
unsigned int currspeed = 0, window;
- sector_t max_sectors,j, io_sectors, recovery_done;
+ sector_t max_sectors, j, io_sectors, recovery_done;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
- int last_mark,m;
+ int last_mark, m;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
@@ -8988,7 +8989,7 @@ void md_do_sync(struct md_thread *thread)
last_check = io_sectors;
repeat:
- if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
+ if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP)) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
@@ -9031,7 +9032,7 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+ pr_info("md: %s: %s %s.\n", mdname(mddev), desc,
test_bit(MD_RECOVERY_INTR, &mddev->recovery)
? "interrupted" : "done");
/*
@@ -9139,7 +9140,7 @@ static int remove_and_add_spares(struct mddev *mddev,
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
test_bit(Faulty, &rdev->flags) &&
- atomic_read(&rdev->nr_pending)==0) {
+ !atomic_read(&rdev->nr_pending)) {
/* Faulty non-Blocked devices with nr_pending == 0
* never get nr_pending incremented,
* never get Faulty cleared, and never get Blocked set.
@@ -9159,7 +9160,7 @@ static int remove_and_add_spares(struct mddev *mddev,
((test_bit(RemoveSynchronized, &rdev->flags) ||
(!test_bit(In_sync, &rdev->flags) &&
!test_bit(Journal, &rdev->flags))) &&
- atomic_read(&rdev->nr_pending)==0)) {
+ !atomic_read(&rdev->nr_pending))) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
@@ -9296,14 +9297,11 @@ void md_check_recovery(struct mddev *mddev)
if (!md_is_rdwr(mddev) &&
!test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
- if ( ! (
- (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
- test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- (mddev->external == 0 && mddev->safemode == 1) ||
- (mddev->safemode == 2
- && !mddev->in_sync && mddev->recovery_cp == MaxSector)
- ))
+ if (!((mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ (mddev->external == 0 && mddev->safemode == 1) ||
+ (mddev->safemode == 2 && !mddev->in_sync && mddev->recovery_cp == MaxSector)))
return;
if (mddev_trylock(mddev)) {
@@ -9894,7 +9892,7 @@ void md_autostart_arrays(int part)
dev = node_detected_dev->dev;
kfree(node_detected_dev);
mutex_unlock(&detected_devices_mutex);
- rdev = md_import_device(dev,0, 90);
+ rdev = md_import_device(dev, 0, 90);
mutex_lock(&detected_devices_mutex);
if (IS_ERR(rdev))
continue;
@@ -9920,7 +9918,7 @@ static __exit void md_exit(void)
struct mddev *mddev, *n;
int delay = 1;
- unregister_blkdev(MD_MAJOR,"md");
+ unregister_blkdev(MD_MAJOR, "md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e148e3c83b0d..39df217b51be 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -652,7 +652,7 @@ static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
sysfs_notify_dirent(sd);
}
-static inline char * mdname (struct mddev * mddev)
+static inline char * mdname(struct mddev * mddev)
{
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b536befd8898..6129ab4d4708 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -41,7 +41,7 @@ static void dump_zones(struct mddev *mddev)
int raid_disks = conf->strip_zone[0].nb_dev;
pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
mdname(mddev),
- conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+ conf->nr_strip_zones, conf->nr_strip_zones == 1 ? "" : "s");
for (j = 0; j < conf->nr_strip_zones; j++) {
char line[200];
int len = 0;
@@ -218,7 +218,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
smallest = NULL;
c = 0;
- for (j=0; j<cnt; j++) {
+ for (j = 0; j < cnt; j++) {
rdev = conf->devlist[j];
if (rdev->sectors <= zone->dev_start) {
pr_debug("md/raid0:%s: checking %pg ... nope\n",
@@ -323,7 +323,7 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
chunk = *sector_offset;
/* quotient is the chunk in real device*/
sector_div(chunk, zone->nb_dev << chunksect_bits);
- } else{
+ } else {
sect_in_chunk = sector_div(sector, chunk_sects);
chunk = *sector_offset;
sector_div(chunk, chunk_sects * zone->nb_dev);
@@ -660,7 +660,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->new_level = 0;
mddev->new_layout = 0;
mddev->new_chunk_sectors = mddev->chunk_sectors;
- mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->delta_disks = -mddev->raid_disks / 2;
mddev->raid_disks += mddev->delta_disks;
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
@@ -755,7 +755,7 @@ static void raid0_quiesce(struct mddev *mddev, int quiesce)
{
}
-static struct md_personality raid0_personality=
+static struct md_personality raid0_personality =
{
.name = "raid0",
.level = 0,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 68a9e2d9985b..884983c89491 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1616,7 +1616,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == READ)
raid1_read_request(mddev, bio, sectors, NULL);
else {
- if (!md_write_start(mddev,bio))
+ if (!md_write_start(mddev, bio))
return false;
raid1_write_request(mddev, bio, sectors);
}
@@ -2040,7 +2040,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
bio->bi_end_io = end_sync_write;
}
- while(sectors) {
+ while (sectors) {
int s = sectors;
int d = r1_bio->read_disk;
int success = 0;
@@ -2130,7 +2130,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
}
sectors -= s;
sect += s;
- idx ++;
+ idx++;
}
set_bit(R1BIO_Uptodate, &r1_bio->state);
bio->bi_status = 0;
@@ -2280,7 +2280,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
sector_t sect, int sectors)
{
struct mddev *mddev = conf->mddev;
- while(sectors) {
+ while (sectors) {
int s = sectors;
int d = read_disk;
int success = 0;
@@ -2327,7 +2327,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
/* write it back and re-read */
start = d;
while (d != read_disk) {
- if (d==0)
+ if (!d)
d = conf->raid_disks * 2;
d--;
rcu_read_lock();
@@ -2344,7 +2344,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
}
d = start;
while (d != read_disk) {
- if (d==0)
+ if (!d)
d = conf->raid_disks * 2;
d--;
rcu_read_lock();
@@ -2772,7 +2772,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_opf = REQ_OP_WRITE;
bio->bi_end_io = end_sync_write;
- write_targets ++;
+ write_targets++;
} else {
/* may need to read from here */
sector_t first_bad = MaxSector;
@@ -3282,8 +3282,8 @@ static int raid1_reshape(struct mddev *mddev)
raid_disks = mddev->raid_disks + mddev->delta_disks;
if (raid_disks < conf->raid_disks) {
- cnt=0;
- for (d= 0; d < conf->raid_disks; d++)
+ cnt = 0;
+ for (d = 0; d < conf->raid_disks; d++)
if (conf->mirrors[d].rdev)
cnt++;
if (cnt > raid_disks)
@@ -3394,7 +3394,7 @@ static struct md_personality raid1_personality =
.status = raid1_status,
.error_handler = raid1_error,
.hot_add_disk = raid1_add_disk,
- .hot_remove_disk= raid1_remove_disk,
+ .hot_remove_disk = raid1_remove_disk,
.spare_active = raid1_spare_active,
.sync_request = raid1_sync_request,
.resize = raid1_resize,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6c66357f92f5..3a5946fa2b90 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -261,7 +261,7 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
int i;
for (i = 0; i < conf->geo.raid_disks; i++) {
- struct bio **bio = & r10_bio->devs[i].bio;
+ struct bio **bio = &r10_bio->devs[i].bio;
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
@@ -303,7 +303,7 @@ static void reschedule_retry(struct r10bio *r10_bio)
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r10_bio->retry_list, &conf->retry_list);
- conf->nr_queued ++;
+ conf->nr_queued++;
spin_unlock_irqrestore(&conf->device_lock, flags);
/* wake up frozen array... */
@@ -588,7 +588,7 @@ static void raid10_end_write_request(struct bio *bio)
static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
{
- int n,f;
+ int n, f;
sector_t sector;
sector_t chunk;
sector_t stripe;
@@ -2402,7 +2402,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_set(&r10_bio->remaining, 1);
/* find the first device with a block */
- for (i=0; i<conf->copies; i++)
+ for (i = 0; i < conf->copies; i++)
if (!r10_bio->devs[i].bio->bi_status)
break;
@@ -2417,7 +2417,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
/* now find blocks with errors */
- for (i=0 ; i < conf->copies ; i++) {
+ for (i = 0; i < conf->copies; i++) {
int j, d;
struct md_rdev *rdev;
struct resync_pages *rp;
@@ -2742,7 +2742,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
return;
}
- while(sectors) {
+ while (sectors) {
int s = sectors;
int sl = r10_bio->read_slot;
int success = 0;
@@ -2806,7 +2806,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
/* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) {
- if (sl==0)
+ if (!sl)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
@@ -2840,7 +2840,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
sl = start;
while (sl != r10_bio->read_slot) {
- if (sl==0)
+ if (!sl)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
@@ -3511,7 +3511,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
&sync_blocks, still_degraded);
any_working = 0;
- for (j=0; j<conf->copies;j++) {
+ for (j = 0; j < conf->copies; j++) {
int k;
int d = r10_bio->devs[j].devnum;
sector_t from_addr, to_addr;
@@ -3552,7 +3552,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
- for (k=0; k<conf->copies; k++)
+ for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
BUG_ON(k == conf->copies);
@@ -3774,7 +3774,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (count < 2) {
- for (i=0; i<conf->copies; i++) {
+ for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
if (r10_bio->devs[i].bio->bi_end_io)
rdev_dec_pending(conf->mirrors[d].rdev,
@@ -3801,7 +3801,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
len = (max_sector - sector_nr) << 9;
if (len == 0)
break;
- for (bio= biolist ; bio ; bio=bio->bi_next) {
+ for (bio = biolist; bio; bio = bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
page = resync_fetch_page(rp, page_idx);
/*
@@ -3892,7 +3892,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
max_sector = sector_nr + max_sync;
sectors_skipped += (max_sector - sector_nr);
- chunks_skipped ++;
+ chunks_skipped++;
sector_nr = max_sector;
goto skipped;
}
@@ -5257,7 +5257,7 @@ static struct md_personality raid10_personality =
.status = raid10_status,
.error_handler = raid10_error,
.hot_add_disk = raid10_add_disk,
- .hot_remove_disk= raid10_remove_disk,
+ .hot_remove_disk = raid10_remove_disk,
.spare_active = raid10_spare_active,
.sync_request = raid10_sync_request,
.quiesce = raid10_quiesce,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7b820b81d8c2..a4351ff3fe31 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -225,7 +225,7 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
int injournal = 0; /* number of date pages with R5_InJournal */
BUG_ON(!list_empty(&sh->lru));
- BUG_ON(atomic_read(&conf->active_stripes)==0);
+ BUG_ON(!atomic_read(&conf->active_stripes));
if (r5c_is_writeback(conf->log))
for (i = sh->disks; i--; )
@@ -2606,7 +2606,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
osh->pages[i] = NULL;
}
#endif
- for(i=0; i<conf->pool_size; i++) {
+ for (i = 0; i < conf->pool_size; i++) {
nsh->dev[i].page = osh->dev[i].page;
nsh->dev[i].orig_page = osh->dev[i].page;
nsh->dev[i].offset = osh->dev[i].offset;
@@ -2654,7 +2654,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
conf->active_name = 1-conf->active_name;
/* Step 4, return new stripes to service */
- while(!list_empty(&newstripes)) {
+ while (!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
list_del_init(&nsh->lru);
@@ -2675,7 +2675,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
nsh->dev[i].offset = raid5_get_page_offset(nsh, i);
}
#else
- for (i=conf->raid_disks; i < newsize; i++)
+ for (i = conf->raid_disks; i < newsize; i++)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
nsh->dev[i].page = p;
@@ -2754,7 +2754,7 @@ static void raid5_end_read_request(struct bio * bi)
struct md_rdev *rdev = NULL;
sector_t s;
- for (i=0 ; i<disks; i++)
+ for (i = 0; i < disks; i++)
if (bi == &sh->dev[i].req)
break;
@@ -3026,7 +3026,7 @@ sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
* Select the parity disk based on the user selected algorithm.
*/
pd_idx = qd_idx = -1;
- switch(conf->level) {
+ switch (conf->level) {
case 4:
pd_idx = data_disks;
break;
@@ -3214,7 +3214,7 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
if (i == sh->pd_idx)
return 0;
- switch(conf->level) {
+ switch (conf->level) {
case 4: break;
case 5:
switch (algorithm) {
@@ -3568,7 +3568,7 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
if (forwrite) {
/* check if page is covered */
sector_t sector = sh->dev[dd_idx].sector;
- for (bi=sh->dev[dd_idx].towrite;
+ for (bi = sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
bi && bi->bi_iter.bi_sector <= sector;
bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
@@ -4291,7 +4291,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
}
if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
- int qread =0;
+ int qread = 0;
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -4702,7 +4702,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Now to look around and see what can be done */
rcu_read_lock();
- for (i=disks; i--; ) {
+ for (i = disks; i--; ) {
struct md_rdev *rdev;
sector_t first_bad;
int bad_sectors;
@@ -5426,7 +5426,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
* add bio to the retry LIFO ( in O(1) ... we are in interrupt )
* later sampled by raid5d.
*/
-static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
+static void add_bio_to_retry(struct bio *bi, struct r5conf *conf)
{
unsigned long flags;
@@ -5451,7 +5451,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf,
return bi;
}
bi = conf->retry_read_aligned_list;
- if(bi) {
+ if (bi) {
conf->retry_read_aligned_list = bi->bi_next;
bi->bi_next = NULL;
*offset = 0;
@@ -6328,8 +6328,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0
- || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ !atomic_read(&conf->reshape_stripes) ||
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
if (atomic_read(&conf->reshape_stripes) != 0)
return 0;
mddev->reshape_position = conf->reshape_progress;
@@ -6368,7 +6368,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
/* If any of this stripe is beyond the end of the old
* array, then we need to zero those blocks
*/
- for (j=sh->disks; j--;) {
+ for (j = sh->disks; j--; ) {
sector_t s;
if (j == sh->pd_idx)
continue;
@@ -7400,7 +7400,7 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
unsigned long ret = SHRINK_STOP;
if (mutex_trylock(&conf->cache_size_mutex)) {
- ret= 0;
+ ret = 0;
while (ret < sc->nr_to_scan &&
conf->max_nr_stripes > conf->min_nr_stripes) {
if (drop_one_stripe(conf) == 0) {
@@ -8580,7 +8580,7 @@ static int raid5_start_reshape(struct mddev *mddev)
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
- conf->generation --;
+ conf->generation--;
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
write_seqcount_end(&conf->gen_lock);
@@ -9015,7 +9015,7 @@ static struct md_personality raid6_personality =
.status = raid5_status,
.error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
- .hot_remove_disk= raid5_remove_disk,
+ .hot_remove_disk = raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = raid5_sync_request,
.resize = raid5_resize,
@@ -9039,7 +9039,7 @@ static struct md_personality raid5_personality =
.status = raid5_status,
.error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
- .hot_remove_disk= raid5_remove_disk,
+ .hot_remove_disk = raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = raid5_sync_request,
.resize = raid5_resize,
@@ -9064,7 +9064,7 @@ static struct md_personality raid4_personality =
.status = raid5_status,
.error_handler = raid5_error,
.hot_add_disk = raid5_add_disk,
- .hot_remove_disk= raid5_remove_disk,
+ .hot_remove_disk = raid5_remove_disk,
.spare_active = raid5_spare_active,
.sync_request = raid5_sync_request,
.resize = raid5_resize,
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..c629bfae826f 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -160,7 +160,7 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
#ifndef __KERNEL__
# define jiffies raid6_jiffies()
-# define printk printf
+# define printk printf
# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
# define GFP_KERNEL 0
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 02/34] md: fix 'foo*' and 'foo * bar' [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
2023-03-06 21:27 ` [PATCH 01/34] md: fix required/prohibited spaces [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 03/34] md: fix EXPORT_SYMBOL() to follow its functions immediately [ERROR] heinzm
` (32 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-multipath.c | 4 ++--
drivers/md/md.c | 26 +++++++++++++-------------
drivers/md/md.h | 2 +-
drivers/md/raid1.c | 4 ++--
drivers/md/raid10.c | 12 ++++++------
drivers/md/raid5.c | 6 +++---
include/linux/raid/xor.h | 28 ++++++++++++++--------------
7 files changed, 41 insertions(+), 41 deletions(-)
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index d772143060bb..932e9fc4b953 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -97,10 +97,10 @@ static void multipath_end_request(struct bio *bio)
rdev_dec_pending(rdev, conf->mddev);
}
-static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
+static bool multipath_make_request(struct mddev *mddev, struct bio *bio)
{
struct mpconf *conf = mddev->private;
- struct multipath_bh * mp_bh;
+ struct multipath_bh *mp_bh;
struct multipath_info *multipath;
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 15be41044d32..8727ebab4b95 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1069,7 +1069,7 @@ static u32 md_csum_fold(u32 csum)
static unsigned int calc_sb_csum(mdp_super_t *sb)
{
u64 newcsum = 0;
- u32 *sb32 = (u32*)sb;
+ u32 *sb32 = (u32 *)sb;
int i;
unsigned int disk_csum, csum;
@@ -1583,7 +1583,7 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
- __le32 *isuper = (__le32*)sb;
+ __le32 *isuper = (__le32 *)sb;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
@@ -1592,7 +1592,7 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
- newcsum += le16_to_cpu(*(__le16*) isuper);
+ newcsum += le16_to_cpu(*(__le16 *) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
@@ -6607,7 +6607,7 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
return 0;
}
-static int get_bitmap_file(struct mddev *mddev, void __user * arg)
+static int get_bitmap_file(struct mddev *mddev, void __user *arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr;
@@ -6639,7 +6639,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
return err;
}
-static int get_disk_info(struct mddev *mddev, void __user * arg)
+static int get_disk_info(struct mddev *mddev, void __user *arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
@@ -8160,7 +8160,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
return NULL;
if (!l--)
/* header */
- return (void*)1;
+ return (void *)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp, &all_mddevs)
@@ -8173,7 +8173,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
}
spin_unlock(&all_mddevs_lock);
if (!l--)
- return (void*)2;/* tail */
+ return (void *)2;/* tail */
return NULL;
}
@@ -8184,11 +8184,11 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct mddev *to_put = NULL;
++*pos;
- if (v == (void*)2)
+ if (v == (void *)2)
return NULL;
spin_lock(&all_mddevs_lock);
- if (v == (void*)1) {
+ if (v == (void *)1) {
tmp = all_mddevs.next;
} else {
to_put = mddev;
@@ -8197,7 +8197,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
for (;;) {
if (tmp == &all_mddevs) {
- next_mddev = (void*)2;
+ next_mddev = (void *)2;
*pos = 0x10000;
break;
}
@@ -8219,7 +8219,7 @@ static void md_seq_stop(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
- if (mddev && v != (void*)1 && v != (void*)2)
+ if (mddev && v != (void *)1 && v != (void *)2)
mddev_put(mddev);
}
@@ -8229,7 +8229,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
sector_t sectors;
struct md_rdev *rdev;
- if (v == (void*)1) {
+ if (v == (void *)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
@@ -8241,7 +8241,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
- if (v == (void*)2) {
+ if (v == (void *)2) {
status_unused(seq);
return 0;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 39df217b51be..10fc3da0dafd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -652,7 +652,7 @@ static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
sysfs_notify_dirent(sd);
}
-static inline char * mdname(struct mddev * mddev)
+static inline char *mdname(struct mddev *mddev)
{
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 884983c89491..0701f11a0da8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -127,7 +127,7 @@ static inline struct r1bio *get_resync_r1bio(struct bio *bio)
return get_resync_pages(bio)->raid_bio;
}
-static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
int size = offsetof(struct r1bio, bios[pi->raid_disks]);
@@ -143,7 +143,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
-static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
struct r1bio *r1_bio;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3a5946fa2b90..f95806a5606e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -103,7 +103,7 @@ static inline struct r10bio *get_resync_r10bio(struct bio *bio)
return get_resync_pages(bio)->raid_bio;
}
-static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
@@ -128,7 +128,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
* one for write (we recover only one drive per r10buf)
*
*/
-static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r10buf_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
struct r10bio *r10_bio;
@@ -909,7 +909,7 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
@@ -1124,7 +1124,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void *)bio->bi_bdev;
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
@@ -3485,7 +3485,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0);
- r10_bio->master_bio = (struct bio*)rb2;
+ r10_bio->master_bio = (struct bio *)rb2;
if (rb2)
atomic_inc(&rb2->remaining);
r10_bio->mddev = mddev;
@@ -3662,7 +3662,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (biolist == NULL) {
while (r10_bio) {
struct r10bio *rb2 = r10_bio;
- r10_bio = (struct r10bio*) rb2->master_bio;
+ r10_bio = (struct r10bio *) rb2->master_bio;
rb2->master_bio = NULL;
put_buf(rb2);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a4351ff3fe31..e4dd6304c018 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2746,7 +2746,7 @@ static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev,
lockdep_is_held(&mddev->reconfig_mutex));
}
-static void raid5_end_read_request(struct bio * bi)
+static void raid5_end_read_request(struct bio *bi)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
@@ -5478,7 +5478,7 @@ static void raid5_align_endio(struct bio *bi)
bio_put(bi);
- rdev = (void*)raid_bi->bi_next;
+ rdev = (void *)raid_bi->bi_next;
raid_bi->bi_next = NULL;
mddev = rdev->mddev;
conf = mddev->private;
@@ -6079,7 +6079,7 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
return ret;
}
-static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool raid5_make_request(struct mddev *mddev, struct bio *bi)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct r5conf *conf = mddev->private;
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 51b811b62322..1827a54790d7 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,20 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_3)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_4)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
- void (*do_5)(unsigned long, unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict,
- const unsigned long * __restrict);
+ void (*do_2)(unsigned long, unsigned long *__restrict,
+ const unsigned long *__restrict);
+ void (*do_3)(unsigned long, unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict);
+ void (*do_4)(unsigned long, unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict);
+ void (*do_5)(unsigned long, unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict,
+ const unsigned long *__restrict);
};
#endif
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 03/34] md: fix EXPORT_SYMBOL() to follow its functions immediately [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
2023-03-06 21:27 ` [PATCH 01/34] md: fix required/prohibited spaces [ERROR] heinzm
2023-03-06 21:27 ` [PATCH 02/34] md: fix 'foo*' and 'foo * bar' [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 04/34] md: adjust braces on functions/structures [ERROR] heinzm
` (31 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8727ebab4b95..cfa957c8287b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6271,7 +6271,6 @@ void md_stop(struct mddev *mddev)
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
-
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
@@ -8585,7 +8584,6 @@ void md_write_end(struct mddev *mddev)
roundup(jiffies, mddev->safemode_delay) +
mddev->safemode_delay);
}
-
EXPORT_SYMBOL(md_write_end);
/* This is used by raid0 and raid10 */
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 04/34] md: adjust braces on functions/structures [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (2 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 03/34] md: fix EXPORT_SYMBOL() to follow its functions immediately [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 05/34] md: correct code indent [ERROR] heinzm
` (30 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Also avoided a few superfluous line splits.
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-faulty.c | 3 +--
drivers/md/md-linear.c | 3 +--
drivers/md/md-linear.h | 3 +--
drivers/md/md-multipath.c | 3 +--
drivers/md/md.c | 13 +++++--------
drivers/md/md.h | 3 +--
drivers/md/raid0.c | 6 ++----
drivers/md/raid1.c | 3 +--
drivers/md/raid10.c | 3 +--
drivers/md/raid5.c | 10 ++++------
10 files changed, 18 insertions(+), 32 deletions(-)
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index b228447e1f88..8493432a732e 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -331,8 +331,7 @@ static void faulty_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-static struct md_personality faulty_personality =
-{
+static struct md_personality faulty_personality = {
.name = "faulty",
.level = LEVEL_FAULTY,
.owner = THIS_MODULE,
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index c0ad603f37a6..35ee116bf45b 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -274,8 +274,7 @@ static void linear_quiesce(struct mddev *mddev, int state)
{
}
-static struct md_personality linear_personality =
-{
+static struct md_personality linear_personality = {
.name = "linear",
.level = LEVEL_LINEAR,
.owner = THIS_MODULE,
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
index 24e97db50ebb..56906a30a577 100644
--- a/drivers/md/md-linear.h
+++ b/drivers/md/md-linear.h
@@ -7,8 +7,7 @@ struct dev_info {
sector_t end_sector;
};
-struct linear_conf
-{
+struct linear_conf {
struct rcu_head rcu;
sector_t array_sectors;
int raid_disks; /* a copy of mddev->raid_disks */
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 932e9fc4b953..c6c0a76c5210 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -435,8 +435,7 @@ static void multipath_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-static struct md_personality multipath_personality =
-{
+static struct md_personality multipath_personality = {
.name = "multipath",
.level = LEVEL_MULTIPATH,
.owner = THIS_MODULE,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cfa957c8287b..315b0810dbdd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1456,8 +1456,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
- if (mddev->in_sync)
- {
+ if (mddev->in_sync) {
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
@@ -4480,10 +4479,9 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_array_state =
__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
-static ssize_t
-max_corrected_read_errors_show(struct mddev *mddev, char *page) {
- return sprintf(page, "%d\n",
- atomic_read(&mddev->max_corr_read_errors));
+static ssize_t max_corrected_read_errors_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%d\n", atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
@@ -7847,8 +7845,7 @@ static void md_free_disk(struct gendisk *disk)
mddev_free(mddev);
}
-const struct block_device_operations md_fops =
-{
+const struct block_device_operations md_fops = {
.owner = THIS_MODULE,
.submit_bio = md_submit_bio,
.open = md_open,
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10fc3da0dafd..45f8ada8814e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -584,8 +584,7 @@ static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
md_sync_acct(bio->bi_bdev, nr_sectors);
}
-struct md_personality
-{
+struct md_personality {
char *name;
int level;
struct list_head list;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6129ab4d4708..582457cea439 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -206,8 +206,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
curr_zone_end = zone->zone_end;
/* now do the other zones */
- for (i = 1; i < conf->nr_strip_zones; i++)
- {
+ for (i = 1; i < conf->nr_strip_zones; i++) {
int j;
zone = conf->strip_zone + i;
@@ -755,8 +754,7 @@ static void raid0_quiesce(struct mddev *mddev, int quiesce)
{
}
-static struct md_personality raid0_personality =
-{
+static struct md_personality raid0_personality = {
.name = "raid0",
.level = 0,
.owner = THIS_MODULE,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0701f11a0da8..415b1dd55baa 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3383,8 +3383,7 @@ static void *raid1_takeover(struct mddev *mddev)
return ERR_PTR(-EINVAL);
}
-static struct md_personality raid1_personality =
-{
+static struct md_personality raid1_personality = {
.name = "raid1",
.level = 1,
.owner = THIS_MODULE,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f95806a5606e..cdc2f2557966 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -5246,8 +5246,7 @@ static void raid10_finish_reshape(struct mddev *mddev)
mddev->reshape_backwards = 0;
}
-static struct md_personality raid10_personality =
-{
+static struct md_personality raid10_personality = {
.name = "raid10",
.level = 10,
.owner = THIS_MODULE,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4dd6304c018..73060e4124b4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -9003,8 +9003,7 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log);
}
-static struct md_personality raid6_personality =
-{
+static struct md_personality raid6_personality = {
.name = "raid6",
.level = 6,
.owner = THIS_MODULE,
@@ -9027,8 +9026,8 @@ static struct md_personality raid6_personality =
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
};
-static struct md_personality raid5_personality =
-{
+
+static struct md_personality raid5_personality = {
.name = "raid5",
.level = 5,
.owner = THIS_MODULE,
@@ -9052,8 +9051,7 @@ static struct md_personality raid5_personality =
.change_consistency_policy = raid5_change_consistency_policy,
};
-static struct md_personality raid4_personality =
-{
+static struct md_personality raid4_personality = {
.name = "raid4",
.level = 4,
.owner = THIS_MODULE,
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 05/34] md: correct code indent [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (3 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 04/34] md: adjust braces on functions/structures [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 06/34] md: move trailing statements to next line [ERROR] heinzm
` (29 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 18 +++++++-----------
drivers/md/md.h | 9 +++++----
drivers/md/raid1.c | 5 ++---
drivers/md/raid10.c | 9 ++++-----
drivers/md/raid5.c | 2 +-
include/linux/raid/xor.h | 6 +++---
6 files changed, 22 insertions(+), 27 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 315b0810dbdd..f8d44832339e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -27,12 +27,11 @@
Errors, Warnings, etc.
Please use:
pr_crit() for error conditions that risk data loss
- pr_err() for error conditions that are unexpected, like an IO error
- or internal inconsistency
+ pr_err() for error conditions that are unexpected, like an IO error or internal inconsistency
pr_warn() for error conditions that could have been predicated, like
- adding a device to an array when it has incompatible metadata
+ adding a device to an array when it has incompatible metadata
pr_info() for every interesting, very rare events, like an array starting
- or stopping, or resync starting or stopping
+ for stopping, or resync starting or stopping
pr_debug() for everything else.
*/
@@ -3249,9 +3248,8 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
;
else if (new_offset > rdev->data_offset) {
/* must not push array size beyond rdev_sectors */
- if (new_offset - rdev->data_offset
- + mddev->dev_sectors > rdev->sectors)
- return -E2BIG;
+ if (new_offset - rdev->data_offset + mddev->dev_sectors > rdev->sectors)
+ return -E2BIG;
}
/* Metadata worries about other space details. */
@@ -5824,10 +5822,8 @@ int md_run(struct mddev *mddev)
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
- rdev->data_offset + mddev->dev_sectors
- > rdev->sb_start) {
- pr_warn("md: %s: data overlaps metadata\n",
- mdname(mddev));
+ rdev->data_offset + mddev->dev_sectors > rdev->sb_start) {
+ pr_warn("md: %s: data overlaps metadata\n", mdname(mddev));
return -EINVAL;
}
} else {
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 45f8ada8814e..9408cfbd92db 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- md.h : kernel internal structure of the Linux MD driver
- Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
-*/
+ * md.h : kernel internal structure of the Linux MD driver
+ *
+ * Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
+ *
+ */
#ifndef _MD_MD_H
#define _MD_MD_H
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 415b1dd55baa..809a46dbbaef 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1324,12 +1324,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &mirror->rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
+ trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), r1_bio->sector);
submit_bio_noacct(read_bio);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cdc2f2557966..a26a3764b234 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1257,12 +1257,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R10BIO_FailFast, &r10_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
+ trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), r10_bio->sector);
submit_bio_noacct(read_bio);
return;
}
@@ -4448,8 +4447,8 @@ static int raid10_check_reshape(struct mddev *mddev)
return -EINVAL;
if (mddev->array_sectors & geo.chunk_mask)
- /* not factor of array size */
- return -EINVAL;
+ /* not factor of array size */
+ return -EINVAL;
if (!enough(conf, -1))
return -EINVAL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 73060e4124b4..1d5db89acb8d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4145,7 +4145,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
sh = list_first_entry(&sh->batch_list,
struct stripe_head, batch_list);
if (sh != head_sh)
- goto unhash;
+ goto unhash;
}
sh = head_sh;
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 1827a54790d7..231f467935a9 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -8,9 +8,9 @@ extern void xor_blocks(unsigned int count, unsigned int bytes,
void *dest, void **srcs);
struct xor_block_template {
- struct xor_block_template *next;
- const char *name;
- int speed;
+ struct xor_block_template *next;
+ const char *name;
+ int speed;
void (*do_2)(unsigned long, unsigned long *__restrict,
const unsigned long *__restrict);
void (*do_3)(unsigned long, unsigned long *__restrict,
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 06/34] md: move trailing statements to next line [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (4 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 05/34] md: correct code indent [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-07 20:23 ` kernel test robot
2023-03-06 21:27 ` [PATCH 07/34] md: consistent spacing around operators [ERROR] heinzm
` (28 subsequent siblings)
34 siblings, 1 reply; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Also, add curly braces where appropriate.
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-autodetect.c | 3 +-
drivers/md/md-faulty.c | 10 +++++--
drivers/md/md.c | 20 ++++++++-----
drivers/md/md.h | 3 +-
drivers/md/raid10.c | 11 ++++----
drivers/md/raid5.c | 57 +++++++++++++++++++++-----------------
6 files changed, 61 insertions(+), 43 deletions(-)
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 46090cdd02ba..e8acb3021094 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -237,7 +237,8 @@ static int __init raid_setup(char *str)
int wlen;
if (comma)
wlen = (comma-str)-pos;
- else wlen = (len-1)-pos;
+ else
+ wlen = (len-1)-pos;
if (!strncmp(str, "noautodetect", wlen))
raid_noautodetect = 1;
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 8493432a732e..33cb00115777 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -125,10 +125,12 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
{
int i;
int n = conf->nfaults;
- for (i = 0; i < conf->nfaults; i++)
+ for (i = 0; i < conf->nfaults; i++) {
if (conf->faults[i] == start) {
switch (mode) {
- case NoPersist: conf->modes[i] = mode; return;
+ case NoPersist:
+ conf->modes[i] = mode;
+ return;
case WritePersistent:
if (conf->modes[i] == ReadPersistent ||
conf->modes[i] == ReadFixable)
@@ -152,6 +154,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
}
} else if (conf->modes[i] == NoPersist)
n = i;
+ }
if (n >= MaxFault)
return;
@@ -271,7 +274,8 @@ static int faulty_reshape(struct mddev *mddev)
}
} else if (mode < Modes) {
conf->period[mode] = count;
- if (!count) count++;
+ if (!count)
+ count++;
atomic_set(&conf->counters[mode], count);
} else
return -EINVAL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f8d44832339e..24e55e2cf4db 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1634,7 +1634,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
- if (ret) return ret;
+ if (ret)
+ return ret;
sb = page_address(rdev->sb_page);
@@ -4599,13 +4600,16 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
- if (buf == end) break;
+ if (buf == end)
+ break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
- if (buf == end) break;
+ if (buf == end)
+ break;
}
- if (*end && !isspace(*end)) break;
+ if (*end && !isspace(*end))
+ break;
md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
@@ -4975,7 +4979,8 @@ sync_speed_show(struct mddev *mddev, char *page)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
- if (!dt) dt++;
+ if (!dt)
+ dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
@@ -7525,7 +7530,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case RAID_VERSION:
err = get_version(argp);
goto out;
- default:;
+ default:
}
/*
@@ -8117,7 +8122,8 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
- if (!dt) dt++;
+ if (!dt)
+ dt++;
curr_mark_cnt = mddev->curr_mark_cnt;
recovery_active = atomic_read(&mddev->recovery_active);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 9408cfbd92db..a885bbcebe2d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -720,7 +720,8 @@ struct md_io_acct {
static inline void safe_put_page(struct page *p)
{
- if (p) put_page(p);
+ if (p)
+ put_page(p);
}
extern int register_md_personality(struct md_personality *p);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a26a3764b234..7a15f794b839 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3335,11 +3335,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
- else for (i = 0; i < conf->geo.raid_disks; i++) {
- sector_t sect =
- raid10_find_virt(conf, mddev->curr_resync, i);
- md_bitmap_end_sync(mddev->bitmap, sect,
- &sync_blocks, 1);
+ else {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ sector_t sect =
+ raid10_find_virt(conf, mddev->curr_resync, i);
+ md_bitmap_end_sync(mddev->bitmap, sect, &sync_blocks, 1);
+ }
}
} else {
/* completed sync */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1d5db89acb8d..00151c850a35 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3215,7 +3215,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
if (i == sh->pd_idx)
return 0;
switch (conf->level) {
- case 4: break;
+ case 4:
+ break;
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
@@ -3712,7 +3713,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].page = sh->dev[i].orig_page;
}
- if (bi) bitmap_end = 1;
+ if (bi)
+ bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
@@ -4202,30 +4204,33 @@ static int handle_stripe_dirtying(struct r5conf *conf,
pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
conf->rmw_level, (unsigned long long)recovery_cp,
(unsigned long long)sh->sector);
- } else for (i = disks; i--; ) {
- /* would I have to read this buffer for read_modify_write */
- struct r5dev *dev = &sh->dev[i];
- if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
- i == sh->pd_idx || i == sh->qd_idx ||
- test_bit(R5_InJournal, &dev->flags)) &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- !(uptodate_for_rmw(dev) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
- if (test_bit(R5_Insync, &dev->flags))
- rmw++;
- else
- rmw += 2*disks; /* cannot read it */
- }
- /* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags) &&
- i != sh->pd_idx && i != sh->qd_idx &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
- if (test_bit(R5_Insync, &dev->flags))
- rcw++;
- else
- rcw += 2*disks;
+ } else {
+ for (i = disks; i--; ) {
+ /* would I have to read this buffer for read_modify_write */
+ struct r5dev *dev = &sh->dev[i];
+ if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
+ i == sh->pd_idx || i == sh->qd_idx ||
+ test_bit(R5_InJournal, &dev->flags)) &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(uptodate_for_rmw(dev) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ if (test_bit(R5_Insync, &dev->flags))
+ rmw++;
+ else
+ rmw += 2*disks; /* cannot read it */
+ }
+
+ /* Would I have to read this buffer for reconstruct_write */
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != sh->pd_idx && i != sh->qd_idx &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ if (test_bit(R5_Insync, &dev->flags))
+ rcw++;
+ else
+ rcw += 2*disks;
+ }
}
}
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH 06/34] md: move trailing statements to next line [ERROR]
2023-03-06 21:27 ` [PATCH 06/34] md: move trailing statements to next line [ERROR] heinzm
@ 2023-03-07 20:23 ` kernel test robot
0 siblings, 0 replies; 42+ messages in thread
From: kernel test robot @ 2023-03-07 20:23 UTC (permalink / raw)
To: heinzm, linux-raid; +Cc: llvm, oe-kbuild-all, ncroxon, xni, dkeefe
Hi,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on song-md/md-next]
[also build test ERROR on linus/master v6.3-rc1 next-20230307]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/heinzm-redhat-com/md-fix-required-prohibited-spaces-ERROR/20230307-053327
base: git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git md-next
patch link: https://lore.kernel.org/r/12a6970ce1bf7489aa67a3c6d70438a48b8f8987.1678136717.git.heinzm%40redhat.com
patch subject: [PATCH 06/34] md: move trailing statements to next line [ERROR]
config: i386-randconfig-a011-20230306 (https://download.01.org/0day-ci/archive/20230308/202303080447.5GUM9IKU-lkp@intel.com/config)
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/0ad2607399ded916c63c96e5e3ac18f74e8a74d2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review heinzm-redhat-com/md-fix-required-prohibited-spaces-ERROR/20230307-053327
git checkout 0ad2607399ded916c63c96e5e3ac18f74e8a74d2
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202303080447.5GUM9IKU-lkp@intel.com/
All errors (new ones prefixed by >>):
>> drivers/md/md.c:7533:10: error: label at end of compound statement: expected statement
default:
^
;
1 error generated.
vim +7533 drivers/md/md.c
7503
7504 static int md_ioctl(struct block_device *bdev, fmode_t mode,
7505 unsigned int cmd, unsigned long arg)
7506 {
7507 int err = 0;
7508 void __user *argp = (void __user *)arg;
7509 struct mddev *mddev = NULL;
7510 bool did_set_md_closing = false;
7511
7512 if (!md_ioctl_valid(cmd))
7513 return -ENOTTY;
7514
7515 switch (cmd) {
7516 case RAID_VERSION:
7517 case GET_ARRAY_INFO:
7518 case GET_DISK_INFO:
7519 break;
7520 default:
7521 if (!capable(CAP_SYS_ADMIN))
7522 return -EACCES;
7523 }
7524
7525 /*
7526 * Commands dealing with the RAID driver but not any
7527 * particular array:
7528 */
7529 switch (cmd) {
7530 case RAID_VERSION:
7531 err = get_version(argp);
7532 goto out;
> 7533 default:
7534 }
7535
7536 /*
7537 * Commands creating/starting a new array:
7538 */
7539
7540 mddev = bdev->bd_disk->private_data;
7541
7542 if (!mddev) {
7543 BUG();
7544 goto out;
7545 }
7546
7547 /* Some actions do not requires the mutex */
7548 switch (cmd) {
7549 case GET_ARRAY_INFO:
7550 if (!mddev->raid_disks && !mddev->external)
7551 err = -ENODEV;
7552 else
7553 err = get_array_info(mddev, argp);
7554 goto out;
7555
7556 case GET_DISK_INFO:
7557 if (!mddev->raid_disks && !mddev->external)
7558 err = -ENODEV;
7559 else
7560 err = get_disk_info(mddev, argp);
7561 goto out;
7562
7563 case SET_DISK_FAULTY:
7564 err = set_disk_faulty(mddev, new_decode_dev(arg));
7565 goto out;
7566
7567 case GET_BITMAP_FILE:
7568 err = get_bitmap_file(mddev, argp);
7569 goto out;
7570
7571 }
7572
7573 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
7574 flush_rdev_wq(mddev);
7575
7576 if (cmd == HOT_REMOVE_DISK)
7577 /* need to ensure recovery thread has run */
7578 wait_event_interruptible_timeout(mddev->sb_wait,
7579 !test_bit(MD_RECOVERY_NEEDED,
7580 &mddev->recovery),
7581 msecs_to_jiffies(5000));
7582 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7583 /* Need to flush page cache, and ensure no-one else opens
7584 * and writes
7585 */
7586 mutex_lock(&mddev->open_mutex);
7587 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7588 mutex_unlock(&mddev->open_mutex);
7589 err = -EBUSY;
7590 goto out;
7591 }
7592 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7593 mutex_unlock(&mddev->open_mutex);
7594 err = -EBUSY;
7595 goto out;
7596 }
7597 did_set_md_closing = true;
7598 mutex_unlock(&mddev->open_mutex);
7599 sync_blockdev(bdev);
7600 }
7601 err = mddev_lock(mddev);
7602 if (err) {
7603 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7604 err, cmd);
7605 goto out;
7606 }
7607
7608 if (cmd == SET_ARRAY_INFO) {
7609 err = __md_set_array_info(mddev, argp);
7610 goto unlock;
7611 }
7612
7613 /*
7614 * Commands querying/configuring an existing array:
7615 */
7616 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7617 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7618 if ((!mddev->raid_disks && !mddev->external)
7619 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7620 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7621 && cmd != GET_BITMAP_FILE) {
7622 err = -ENODEV;
7623 goto unlock;
7624 }
7625
7626 /*
7627 * Commands even a read-only array can execute:
7628 */
7629 switch (cmd) {
7630 case RESTART_ARRAY_RW:
7631 err = restart_array(mddev);
7632 goto unlock;
7633
7634 case STOP_ARRAY:
7635 err = do_md_stop(mddev, 0, bdev);
7636 goto unlock;
7637
7638 case STOP_ARRAY_RO:
7639 err = md_set_readonly(mddev, bdev);
7640 goto unlock;
7641
7642 case HOT_REMOVE_DISK:
7643 err = hot_remove_disk(mddev, new_decode_dev(arg));
7644 goto unlock;
7645
7646 case ADD_NEW_DISK:
7647 /* We can support ADD_NEW_DISK on read-only arrays
7648 * only if we are re-adding a preexisting device.
7649 * So require mddev->pers and MD_DISK_SYNC.
7650 */
7651 if (mddev->pers) {
7652 mdu_disk_info_t info;
7653 if (copy_from_user(&info, argp, sizeof(info)))
7654 err = -EFAULT;
7655 else if (!(info.state & (1<<MD_DISK_SYNC)))
7656 /* Need to clear read-only for this */
7657 break;
7658 else
7659 err = md_add_new_disk(mddev, &info);
7660 goto unlock;
7661 }
7662 break;
7663 }
7664
7665 /*
7666 * The remaining ioctls are changing the state of the
7667 * superblock, so we do not allow them on read-only arrays.
7668 */
7669 if (!md_is_rdwr(mddev) && mddev->pers) {
7670 if (mddev->ro != MD_AUTO_READ) {
7671 err = -EROFS;
7672 goto unlock;
7673 }
7674 mddev->ro = MD_RDWR;
7675 sysfs_notify_dirent_safe(mddev->sysfs_state);
7676 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7677 /* mddev_unlock will wake thread */
7678 /* If a device failed while we were read-only, we
7679 * need to make sure the metadata is updated now.
7680 */
7681 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7682 mddev_unlock(mddev);
7683 wait_event(mddev->sb_wait,
7684 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7685 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7686 mddev_lock_nointr(mddev);
7687 }
7688 }
7689
7690 switch (cmd) {
7691 case ADD_NEW_DISK:
7692 {
7693 mdu_disk_info_t info;
7694 if (copy_from_user(&info, argp, sizeof(info)))
7695 err = -EFAULT;
7696 else
7697 err = md_add_new_disk(mddev, &info);
7698 goto unlock;
7699 }
7700
7701 case CLUSTERED_DISK_NACK:
7702 if (mddev_is_clustered(mddev))
7703 md_cluster_ops->new_disk_ack(mddev, false);
7704 else
7705 err = -EINVAL;
7706 goto unlock;
7707
7708 case HOT_ADD_DISK:
7709 err = hot_add_disk(mddev, new_decode_dev(arg));
7710 goto unlock;
7711
7712 case RUN_ARRAY:
7713 err = do_md_run(mddev);
7714 goto unlock;
7715
7716 case SET_BITMAP_FILE:
7717 err = set_bitmap_file(mddev, (int)arg);
7718 goto unlock;
7719
7720 default:
7721 err = -EINVAL;
7722 goto unlock;
7723 }
7724
7725 unlock:
7726 if (mddev->hold_active == UNTIL_IOCTL &&
7727 err != -EINVAL)
7728 mddev->hold_active = 0;
7729 mddev_unlock(mddev);
7730 out:
7731 if (did_set_md_closing)
7732 clear_bit(MD_CLOSING, &mddev->flags);
7733 return err;
7734 }
7735 #ifdef CONFIG_COMPAT
7736 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7737 unsigned int cmd, unsigned long arg)
7738 {
7739 switch (cmd) {
7740 case HOT_REMOVE_DISK:
7741 case HOT_ADD_DISK:
7742 case SET_DISK_FAULTY:
7743 case SET_BITMAP_FILE:
7744 /* These take in integer arg, do not convert */
7745 break;
7746 default:
7747 arg = (unsigned long)compat_ptr(arg);
7748 break;
7749 }
7750
7751 return md_ioctl(bdev, mode, cmd, arg);
7752 }
7753 #endif /* CONFIG_COMPAT */
7754
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH 07/34] md: consistent spacing around operators [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (5 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 06/34] md: move trailing statements to next line [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 08/34] md: don't initilize statics/globals to 0/false [ERROR] heinzm
` (27 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 24e55e2cf4db..e6ff0da6ebb6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -111,7 +111,7 @@ static bool md_is_rdwr(struct mddev *mddev)
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/* Default safemode delay: 200 msec */
-#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
+#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 + 1)
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
@@ -1460,7 +1460,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
- sb->state = (1<< MD_SB_CLEAN);
+ sb->state = (1<<MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
@@ -9011,8 +9011,8 @@ void md_do_sync(struct md_thread *thread)
cond_resched();
recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
- currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
- /((jiffies-mddev->resync_mark)/HZ +1) +1;
+ currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt)) / 2
+ / ((jiffies-mddev->resync_mark)/HZ + 1) + 1;
if (currspeed > speed_min(mddev)) {
if (currspeed > speed_max(mddev)) {
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 08/34] md: don't initilize statics/globals to 0/false [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (6 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 07/34] md: consistent spacing around operators [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-07 9:42 ` Paul Menzel
2023-03-06 21:27 ` [PATCH 09/34] md: else should follow close curly brace [ERROR] heinzm
` (26 subsequent siblings)
34 siblings, 1 reply; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 2 +-
drivers/md/raid0.c | 2 +-
drivers/md/raid5.c | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e6ff0da6ebb6..9dc1df40c52d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5563,7 +5563,7 @@ static struct kobj_type md_ktype = {
.default_groups = md_attr_groups,
};
-int mdp_major = 0;
+int mdp_major;
static void mddev_delayed_delete(struct work_struct *ws)
{
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 582457cea439..11b9815f153d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -19,7 +19,7 @@
#include "raid0.h"
#include "raid5.h"
-static int default_layout = 0;
+static int default_layout;
module_param(default_layout, int, 0644);
#define UNSUPPORTED_MDDEV_FLAGS \
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 00151c850a35..d0b6a97200fa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -64,7 +64,7 @@
#define RAID5_MAX_REQ_STRIPES 256
-static bool devices_handle_discard_safely = false;
+static bool devices_handle_discard_safely;
module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH 08/34] md: don't initilize statics/globals to 0/false [ERROR]
2023-03-06 21:27 ` [PATCH 08/34] md: don't initilize statics/globals to 0/false [ERROR] heinzm
@ 2023-03-07 9:42 ` Paul Menzel
0 siblings, 0 replies; 42+ messages in thread
From: Paul Menzel @ 2023-03-07 9:42 UTC (permalink / raw)
To: Heinz Mauelshagen; +Cc: linux-raid, ncroxon, xni, dkeefe
Dear Heinz,
Thank you for your patch.
Am 06.03.23 um 22:27 schrieb heinzm@redhat.com:
> From: Heinz Mauelshagen <heinzm@redhat.com>
There is a small typo in the commit message summary: initi*a*lize.
> Signed-off-by: heinzm <heinzm@redhat.com>
Please also use the full name in the Signed-off-by line.
> ---
> drivers/md/md.c | 2 +-
> drivers/md/raid0.c | 2 +-
> drivers/md/raid5.c | 2 +-
> 3 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index e6ff0da6ebb6..9dc1df40c52d 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -5563,7 +5563,7 @@ static struct kobj_type md_ktype = {
> .default_groups = md_attr_groups,
> };
>
> -int mdp_major = 0;
> +int mdp_major;
>
> static void mddev_delayed_delete(struct work_struct *ws)
> {
> diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
> index 582457cea439..11b9815f153d 100644
> --- a/drivers/md/raid0.c
> +++ b/drivers/md/raid0.c
> @@ -19,7 +19,7 @@
> #include "raid0.h"
> #include "raid5.h"
>
> -static int default_layout = 0;
> +static int default_layout;
> module_param(default_layout, int, 0644);
>
> #define UNSUPPORTED_MDDEV_FLAGS \
> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
> index 00151c850a35..d0b6a97200fa 100644
> --- a/drivers/md/raid5.c
> +++ b/drivers/md/raid5.c
> @@ -64,7 +64,7 @@
>
> #define RAID5_MAX_REQ_STRIPES 256
>
> -static bool devices_handle_discard_safely = false;
> +static bool devices_handle_discard_safely;
> module_param(devices_handle_discard_safely, bool, 0644);
> MODULE_PARM_DESC(devices_handle_discard_safely,
> "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Kind regards,
Paul
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH 09/34] md: else should follow close curly brace [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (7 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 08/34] md: don't initilize statics/globals to 0/false [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-07 9:45 ` Paul Menzel
2023-03-06 21:27 ` [PATCH 10/34] md: remove trailing whitespace [ERROR] heinzm
` (25 subsequent siblings)
34 siblings, 1 reply; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-cluster.c | 3 +--
drivers/md/md.c | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 9bcf816b80a1..760b3ba37854 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -548,8 +548,7 @@ static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
set_bit(ClusterRemove, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- }
- else
+ } else
pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
__func__, __LINE__, le32_to_cpu(msg->raid_slot));
rcu_read_unlock();
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9dc1df40c52d..ff4699babdd6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9694,8 +9694,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
rdev2->bdev);
md_kick_rdev_from_array(rdev2);
continue;
- }
- else
+ } else
clear_bit(Candidate, &rdev2->flags);
}
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH 09/34] md: else should follow close curly brace [ERROR]
2023-03-06 21:27 ` [PATCH 09/34] md: else should follow close curly brace [ERROR] heinzm
@ 2023-03-07 9:45 ` Paul Menzel
0 siblings, 0 replies; 42+ messages in thread
From: Paul Menzel @ 2023-03-07 9:45 UTC (permalink / raw)
To: Heinz Mauelshagen; +Cc: linux-raid, ncroxon, xni, dkeefe
Dear Heinz,
Thank you for your patch.
Am 06.03.23 um 22:27 schrieb heinzm@redhat.com:
> From: Heinz Mauelshagen <heinzm@redhat.com>
I’d prefer statements as commit message summary, that means adding a
verb (in imperative mood):
md: Let else follow close curly brace [ERROR]
> Signed-off-by: heinzm <heinzm@redhat.com>
> ---
> drivers/md/md-cluster.c | 3 +--
> drivers/md/md.c | 3 +--
> 2 files changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
> index 9bcf816b80a1..760b3ba37854 100644
> --- a/drivers/md/md-cluster.c
> +++ b/drivers/md/md-cluster.c
> @@ -548,8 +548,7 @@ static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
> set_bit(ClusterRemove, &rdev->flags);
> set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
> md_wakeup_thread(mddev->thread);
> - }
> - else
> + } else
> pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
> __func__, __LINE__, le32_to_cpu(msg->raid_slot));
> rcu_read_unlock();
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 9dc1df40c52d..ff4699babdd6 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -9694,8 +9694,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
> rdev2->bdev);
> md_kick_rdev_from_array(rdev2);
> continue;
> - }
> - else
> + } else
> clear_bit(Candidate, &rdev2->flags);
> }
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Kind regards,
Paul
^ permalink raw reply [flat|nested] 42+ messages in thread
* [PATCH 10/34] md: remove trailing whitespace [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (8 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 09/34] md: else should follow close curly brace [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 11/34] md: do not use assignment in if condition [ERROR] heinzm
` (24 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 37 +++++++++++++++++++------------------
1 file changed, 19 insertions(+), 18 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ff4699babdd6..132979e597dd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2156,28 +2156,29 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
- } else if (rdev->mddev->bitmap_info.offset) {
- /* minor version 0 with bitmap we can't move */
- return 0;
} else {
- /* minor version 0; superblock after data */
- sector_t sb_start, bm_space;
- sector_t dev_size = bdev_nr_sectors(rdev->bdev);
+ if (!rdev->mddev->bitmap_info.offset) {
+ /* minor version 0; superblock after data */
+ sector_t sb_start, bm_space;
+ sector_t dev_size = bdev_nr_sectors(rdev->bdev);
- /* 8K is for superblock */
- sb_start = dev_size - 8*2;
- sb_start &= ~(sector_t)(4*2 - 1);
+ /* 8K is for superblock */
+ sb_start = dev_size - 8*2;
+ sb_start &= ~(sector_t)(4*2 - 1);
- bm_space = super_1_choose_bm_space(dev_size);
+ bm_space = super_1_choose_bm_space(dev_size);
- /* Space that can be used to store date needs to decrease
- * superblock bitmap space and bad block space(4K)
- */
- max_sectors = sb_start - bm_space - 4*2;
+ /* Space that can be used to store date needs to decrease
+ * superblock bitmap space and bad block space(4K)
+ */
+ max_sectors = sb_start - bm_space - 4*2;
- if (!num_sectors || num_sectors > max_sectors)
- num_sectors = max_sectors;
- rdev->sb_start = sb_start;
+ if (!num_sectors || num_sectors > max_sectors)
+ num_sectors = max_sectors;
+ rdev->sb_start = sb_start;
+ } else
+ /* minor version 0 with bitmap we can't move */
+ return 0;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
@@ -4608,7 +4609,7 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
if (buf == end)
break;
}
- if (*end && !isspace(*end))
+ if (*end && !isspace(*end))
break;
md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 11/34] md: do not use assignment in if condition [ERROR]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (9 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 10/34] md: remove trailing whitespace [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 12/34] md: add missing blank line after declaration [WARNING] heinzm
` (23 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-faulty.c | 17 +++++++++++------
drivers/md/md-multipath.c | 9 ++++++---
drivers/md/md.c | 36 ++++++++++++++++++++++--------------
drivers/md/raid10.c | 10 ++++++----
drivers/md/raid5.c | 5 +++--
5 files changed, 48 insertions(+), 29 deletions(-)
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 33cb00115777..7ac286e28fcb 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -226,28 +226,33 @@ static void faulty_status(struct seq_file *seq, struct mddev *mddev)
struct faulty_conf *conf = mddev->private;
int n;
- if ((n = atomic_read(&conf->counters[WriteTransient])))
+ n = atomic_read(&conf->counters[WriteTransient]);
+ if (n)
seq_printf(seq, " WriteTransient=%d(%d)",
n, conf->period[WriteTransient]);
- if ((n = atomic_read(&conf->counters[ReadTransient])))
+ n = atomic_read(&conf->counters[ReadTransient]);
+ if (n)
seq_printf(seq, " ReadTransient=%d(%d)",
n, conf->period[ReadTransient]);
- if ((n = atomic_read(&conf->counters[WritePersistent])))
+ n = atomic_read(&conf->counters[WritePersistent]);
+ if (n)
seq_printf(seq, " WritePersistent=%d(%d)",
n, conf->period[WritePersistent]);
- if ((n = atomic_read(&conf->counters[ReadPersistent])))
+ n = atomic_read(&conf->counters[ReadPersistent]);
+ if (n)
seq_printf(seq, " ReadPersistent=%d(%d)",
n, conf->period[ReadPersistent]);
- if ((n = atomic_read(&conf->counters[ReadFixable])))
+ n = atomic_read(&conf->counters[ReadFixable]);
+ if (n)
seq_printf(seq, " ReadFixable=%d(%d)",
n, conf->period[ReadFixable]);
- if ((n = atomic_read(&conf->counters[WriteAll])))
+ if (atomic_read(&conf->counters[WriteAll]))
seq_printf(seq, " WriteAll");
seq_printf(seq, " nfaults=%d", conf->nfaults);
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index c6c0a76c5210..dd180199479b 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -216,8 +216,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
print_multipath_conf(conf);
- for (path = first; path <= last; path++)
- if (!(p = conf->multipaths+path)->rdev) {
+ for (path = first; path <= last; path++) {
+ p = conf->multipaths+path;
+ if (!p->rdev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -233,6 +234,7 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
err = 0;
break;
}
+ }
print_multipath_conf(conf);
@@ -302,7 +304,8 @@ static void multipathd(struct md_thread *thread)
bio = &mp_bh->bio;
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
- if ((mp_bh->path = multipath_map(conf)) < 0) {
+ mp_bh->path = multipath_map(conf);
+ if (mp_bh->path < 0) {
pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
bio->bi_bdev,
(unsigned long long)bio->bi_iter.bi_sector);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 132979e597dd..a48fbc80fc64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2436,7 +2436,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
if (mddev->raid_disks)
mddev_create_serial_pool(mddev, rdev, false);
- if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
+ err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b);
+ if (err)
goto fail;
/* failure here is OK */
@@ -3059,8 +3060,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* will land in the local bitmap, which will be synced
* by this node eventually
*/
- if (!mddev_is_clustered(rdev->mddev) ||
- (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ if (mddev_is_clustered(rdev->mddev))
+ err = md_cluster_ops->gather_bitmaps(rdev);
+ else
+ err = 0;
+
+ if (!err) {
clear_bit(Faulty, &rdev->flags);
err = add_bound_rdev(rdev);
}
@@ -9401,17 +9406,20 @@ void md_check_recovery(struct mddev *mddev)
goto not_running;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- } else if ((spares = remove_and_add_spares(mddev, NULL))) {
- clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- } else if (mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- /* nothing to be done ... */
- goto not_running;
+ } else {
+ spares = remove_and_add_spares(mddev, NULL);
+ if (spares) {
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ } else if (mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ /* nothing to be done ... */
+ goto not_running;
+ }
if (mddev->pers->sync_request) {
if (spares) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7a15f794b839..8f3339e73f55 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1982,10 +1982,12 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
int this = first;
while (n--) {
struct md_rdev *rdev;
- if (this != ignore &&
- (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
- test_bit(In_sync, &rdev->flags))
- cnt++;
+ if (this != ignore) {
+ rdev = rcu_dereference(conf->mirrors[this].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags))
+ cnt++;
+ }
+
this = (this+1) % disks;
}
if (cnt == 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d0b6a97200fa..4bdfbe1f8fcf 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -549,9 +549,10 @@ static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
for (i = 0; i < num; i++) {
struct page *page;
- if (!(page = alloc_page(gfp))) {
+ page = alloc_page(gfp);
+ if (!page)
return 1;
- }
+
sh->dev[i].page = page;
sh->dev[i].orig_page = page;
sh->dev[i].offset = 0;
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 12/34] md: add missing blank line after declaration [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (10 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 11/34] md: do not use assignment in if condition [ERROR] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 13/34] md: space prohibited between function and opening parenthesis [WARNING] heinzm
` (22 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-autodetect.c | 1 +
drivers/md/md-bitmap.c | 26 +++++++++++++++++
drivers/md/md-cluster.c | 7 +++++
drivers/md/md-faulty.c | 3 ++
drivers/md/md-multipath.c | 3 ++
drivers/md/md.c | 49 +++++++++++++++++++++++++++++++-
drivers/md/raid0.c | 1 +
drivers/md/raid1.c | 32 ++++++++++++++++++++-
drivers/md/raid10.c | 44 +++++++++++++++++++++++++++++
drivers/md/raid5-cache.c | 1 +
drivers/md/raid5-ppl.c | 1 +
drivers/md/raid5.c | 58 ++++++++++++++++++++++++++++++++++++--
include/linux/raid/pq.h | 1 +
13 files changed, 222 insertions(+), 5 deletions(-)
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index e8acb3021094..ff60c2272919 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -235,6 +235,7 @@ static int __init raid_setup(char *str)
while (pos < len) {
char *comma = strchr(str+pos, ',');
int wlen;
+
if (comma)
wlen = (comma-str)-pos;
else
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index f2192aa8b826..3cee70340024 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -226,6 +226,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
if (page->index == store->file_pages-1) {
int last_page_size = store->bytes & (PAGE_SIZE-1);
+
if (last_page_size == 0)
last_page_size = PAGE_SIZE;
size = roundup(last_page_size,
@@ -333,6 +334,7 @@ static void free_buffers(struct page *page)
bh = page_buffers(page);
while (bh) {
struct buffer_head *next = bh->b_this_page;
+
free_buffer_head(bh);
bh = next;
}
@@ -849,6 +851,7 @@ static void md_bitmap_file_unmap(struct bitmap_storage *store)
if (file) {
struct inode *inode = file_inode(file);
+
invalidate_mapping_pages(inode->i_mapping, 0, -1);
fput(file);
}
@@ -1102,6 +1105,7 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
for (i = 0; i < chunks; i++) {
int b;
+
index = file_page_index(&bitmap->storage, i);
bit = file_page_offset(&bitmap->storage, i);
if (index != oldindex) { /* this is a new page, read it in */
@@ -1198,6 +1202,7 @@ static void md_bitmap_count_page(struct bitmap_counts *bitmap,
{
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
+
bitmap->bp[page].count += inc;
md_bitmap_checkfree(bitmap, page);
}
@@ -1268,6 +1273,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
/* Arrange for superblock update as well as
* other changes */
bitmap_super_t *sb;
+
bitmap->need_sync = 0;
if (bitmap->storage.filemap) {
sb = kmap_atomic(bitmap->storage.sb_page);
@@ -1381,6 +1387,7 @@ __acquires(bitmap->lock)
/* should we use the first or second counter field
* of the hijacked pointer? */
int hi = (pageoff > PAGE_COUNTER_MASK);
+
return &((bitmap_counter_t *)
&bitmap->bp[page].map)[hi];
} else /* page is allocated */
@@ -1395,6 +1402,7 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s
if (behind) {
int bw;
+
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
@@ -1510,6 +1518,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
{
bitmap_counter_t *bmc;
int rv;
+
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
*blocks = 1024;
return 1; /* always resync if no bitmap */
@@ -1596,6 +1605,7 @@ void md_bitmap_close_sync(struct bitmap *bitmap)
*/
sector_t sector = 0;
sector_t blocks;
+
if (!bitmap)
return;
while (sector < bitmap->mddev->resync_max_sectors) {
@@ -1665,6 +1675,7 @@ static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, in
sector_t secs;
bitmap_counter_t *bmc;
+
spin_lock_irq(&bitmap->counts.lock);
bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
if (!bmc) {
@@ -1689,6 +1700,7 @@ void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
+
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
if (sec < bitmap->mddev->recovery_cp)
@@ -1917,6 +1929,7 @@ int md_bitmap_load(struct mddev *mddev)
*/
while (sector < mddev->resync_max_sectors) {
sector_t blocks;
+
md_bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
@@ -2158,6 +2171,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
/* For cluster raid, need to pre-allocate bitmap */
if (mddev_is_clustered(bitmap->mddev)) {
unsigned long page;
+
for (page = 0; page < pages; page++) {
ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
if (ret) {
@@ -2219,6 +2233,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (bitmap->counts.bp != old_counts.bp) {
unsigned long k;
+
for (k = 0; k < old_counts.pages; k++)
if (!old_counts.bp[k].hijacked)
kfree(old_counts.bp[k].map);
@@ -2227,8 +2242,10 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (!init) {
int i;
+
while (block < (chunks << chunkshift)) {
bitmap_counter_t *bmc;
+
bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
if (bmc) {
/* new space. It needs to be resynced, so
@@ -2261,6 +2278,7 @@ static ssize_t
location_show(struct mddev *mddev, char *page)
{
ssize_t len;
+
if (mddev->bitmap_info.file)
len = sprintf(page, "file");
else if (mddev->bitmap_info.offset)
@@ -2305,12 +2323,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
+
mddev->bitmap_info.file = NULL;
fput(f);
}
} else {
/* No bitmap, OK to set a location */
long long offset;
+
if (strncmp(buf, "none", 4) == 0)
/* nothing to be done */;
else if (strncmp(buf, "file:", 5) == 0) {
@@ -2337,6 +2357,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
+
bitmap = md_bitmap_create(mddev, -1);
mddev_suspend(mddev);
if (IS_ERR(bitmap))
@@ -2431,6 +2452,7 @@ timeout_store(struct mddev *mddev, const char *buf, size_t len)
/* timeout can be set at any time */
unsigned long timeout;
int rv = strict_strtoul_scaled(buf, &timeout, 4);
+
if (rv)
return rv;
@@ -2475,6 +2497,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
struct md_rdev *rdev;
bool has_write_mostly = false;
int rv = kstrtoul(buf, 10, &backlog);
+
if (rv)
return rv;
if (backlog > COUNTER_MAX)
@@ -2528,6 +2551,7 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
/* Can only be changed when no bitmap is active */
int rv;
unsigned long csize;
+
if (mddev->bitmap)
return -EBUSY;
rv = kstrtoul(buf, 10, &csize);
@@ -2576,6 +2600,7 @@ __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t can_clear_show(struct mddev *mddev, char *page)
{
int len;
+
spin_lock(&mddev->lock);
if (mddev->bitmap)
len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
@@ -2608,6 +2633,7 @@ static ssize_t
behind_writes_used_show(struct mddev *mddev, char *page)
{
ssize_t ret;
+
spin_lock(&mddev->lock);
if (mddev->bitmap == NULL)
ret = sprintf(page, "0\n");
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 760b3ba37854..b61b1fba1c77 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -353,6 +353,7 @@ static void recover_prep(void *arg)
{
struct mddev *mddev = arg;
struct md_cluster_info *cinfo = mddev->cluster_info;
+
set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
@@ -432,6 +433,7 @@ static void ack_bast(void *arg, int mode)
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
cinfo->suspend_hi = 0;
@@ -527,6 +529,7 @@ static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg
{
int got_lock = 0;
struct md_cluster_info *cinfo = mddev->cluster_info;
+
mddev->good_device_nr = le32_to_cpu(msg->raid_slot);
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
@@ -1099,6 +1102,7 @@ static int metadata_update_finish(struct mddev *mddev)
static void metadata_update_cancel(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
unlock_comm(cinfo);
}
@@ -1317,6 +1321,7 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
static int resync_start(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
}
@@ -1448,6 +1453,7 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
static void add_new_disk_cancel(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+
clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
unlock_comm(cinfo);
}
@@ -1471,6 +1477,7 @@ static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct cluster_msg cmsg = {0};
struct md_cluster_info *cinfo = mddev->cluster_info;
+
cmsg.type = cpu_to_le32(REMOVE);
cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
return sendmsg(cinfo, &cmsg, 1);
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index 7ac286e28fcb..d6dbca5edab8 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -101,6 +101,7 @@ static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end,
{
/* If we find a ReadFixable sector, we fix it ... */
int i;
+
for (i = 0; i < conf->nfaults; i++)
if (conf->faults[i] >= start &&
conf->faults[i] < end) {
@@ -125,6 +126,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
{
int i;
int n = conf->nfaults;
+
for (i = 0; i < conf->nfaults; i++) {
if (conf->faults[i] == start) {
switch (mode) {
@@ -273,6 +275,7 @@ static int faulty_reshape(struct mddev *mddev)
conf->nfaults = 0;
else if (mode == ClearErrors) {
int i;
+
for (i = 0; i < Modes ; i++) {
conf->period[i] = 0;
atomic_set(&conf->counters[i], 0);
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index dd180199479b..a26ed5a3643b 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -35,6 +35,7 @@ static int multipath_map (struct mpconf *conf)
rcu_read_lock();
for (i = 0; i < disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
+
if (rdev && test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
@@ -141,6 +142,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
+
seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
@@ -169,6 +171,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
*/
if (test_and_clear_bit(In_sync, &rdev->flags)) {
unsigned long flags;
+
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
spin_unlock_irqrestore(&conf->device_lock, flags);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a48fbc80fc64..918565c03279 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -549,6 +549,7 @@ static void submit_flushes(struct work_struct *ws)
* we reclaim rcu_read_lock
*/
struct bio *bi;
+
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -602,6 +603,7 @@ static void md_submit_flush_data(struct work_struct *ws)
bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
ktime_t req_start = ktime_get_boottime();
+
spin_lock_irq(&mddev->lock);
/* flush requests wait until ongoing flush completes,
* hence coalescing all the pending requests.
@@ -794,6 +796,7 @@ void mddev_unlock(struct mddev *mddev)
* is seen.
*/
const struct attribute_group *to_remove = mddev->to_remove;
+
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
@@ -867,6 +870,7 @@ EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
static struct md_personality *find_pers(int level, char *clevel)
{
struct md_personality *pers;
+
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
@@ -1233,6 +1237,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
+
if (!md_uuid_equal(refsb, sb)) {
pr_warn("md: %pg has different UUID to %pg\n",
rdev->bdev, refdev->bdev);
@@ -1521,6 +1526,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
/* now set the "removed" and "faulty" bits on any missing devices */
for (i = 0; i < mddev->raid_disks; i++) {
mdp_disk_t *d = &sb->disks[i];
+
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
@@ -1702,6 +1708,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
__le64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
+
if (sectors > (PAGE_SIZE / 512))
return -EINVAL;
offset = le32_to_cpu(sb->bblog_offset);
@@ -1717,6 +1724,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
u64 bb = le64_to_cpu(*bbp);
int count = bb & (0x3ff);
u64 sector = bb >> 10;
+
sector <<= sb->bblog_shift;
count <<= sb->bblog_shift;
if (bb + 1 == 0)
@@ -1901,6 +1909,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
+
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = MD_DISK_ROLE_SPARE;
@@ -2051,6 +2060,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
struct badblocks *bb = &rdev->badblocks;
__le64 *bbp = (__le64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
+
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
@@ -2083,6 +2093,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
+
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
@@ -2147,6 +2158,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
+
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->data_offset != rdev->new_data_offset)
@@ -2198,6 +2210,7 @@ super_1_allow_new_offset(struct md_rdev *rdev,
{
/* All necessary checks on new >= old have been done */
struct bitmap *bitmap;
+
if (new_offset >= rdev->data_offset)
return 1;
@@ -2409,6 +2422,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
rcu_read_lock();
if (rdev->desc_nr < 0) {
int choice = 0;
+
if (mddev->pers)
choice = mddev->raid_disks;
while (md_find_rdev_nr_rcu(mddev, choice))
@@ -2465,6 +2479,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
static void rdev_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
+
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
@@ -2559,6 +2574,7 @@ static void sync_sbs(struct mddev *mddev, int nospares)
* with the rest of the array)
*/
struct md_rdev *rdev;
+
rdev_for_each(rdev, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
@@ -3216,6 +3232,7 @@ static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long offset;
+
if (kstrtoull(buf, 10, &offset) < 0)
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
@@ -3596,6 +3613,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
static void rdev_free(struct kobject *ko)
{
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
+
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
@@ -3791,11 +3809,13 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
+
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
+
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
@@ -3817,6 +3837,7 @@ static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
+
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
@@ -3853,6 +3874,7 @@ level_show(struct mddev *mddev, char *page)
{
struct md_personality *p;
int ret;
+
spin_lock(&mddev->lock);
p = mddev->pers;
if (p)
@@ -4320,6 +4342,7 @@ static char *array_states[] = {
static int match_word(const char *word, char **list)
{
int n;
+
for (n = 0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
@@ -4718,6 +4741,7 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
+
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
@@ -4759,6 +4783,7 @@ action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
unsigned long recovery = mddev->recovery;
+
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
@@ -4824,6 +4849,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
+
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev_lock(mddev);
@@ -4981,6 +5007,7 @@ static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
+
if (mddev->curr_resync == MD_RESYNC_NONE)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
@@ -5067,6 +5094,7 @@ static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
int err;
+
spin_lock(&mddev->lock);
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
@@ -5634,6 +5662,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
/* Need to ensure that 'name' is not a duplicate.
*/
struct mddev *mddev2;
+
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
@@ -6247,6 +6276,7 @@ static void mddev_detach(struct mddev *mddev)
static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
+
md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
@@ -6409,6 +6439,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->bitmap_info.file) {
struct file *f = mddev->bitmap_info.file;
+
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
spin_unlock(&mddev->lock);
@@ -6472,6 +6503,7 @@ static void autorun_devices(int part)
int unit;
dev_t dev;
LIST_HEAD(candidates);
+
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
@@ -6734,6 +6766,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
*/
if (mddev->pers) {
int err;
+
if (!mddev->pers->hot_add_disk) {
pr_warn("%s: personality does not support diskops!\n",
mdname(mddev));
@@ -6851,6 +6884,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
+
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
pr_warn("md: error, md_import_device() returned %ld\n",
@@ -7083,6 +7117,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
}
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
+
if (f) {
spin_lock(&mddev->lock);
mddev->bitmap_info.file = NULL;
@@ -7656,6 +7691,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
*/
if (mddev->pers) {
mdu_disk_info_t info;
+
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else if (!(info.state & (1<<MD_DISK_SYNC)))
@@ -7697,6 +7733,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
+
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
@@ -8091,6 +8128,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
+
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
@@ -8235,7 +8273,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (v == (void *)1) {
struct md_personality *pers;
- seq_printf(seq, "Personalities : ");
+
+ seq_puts(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
@@ -8396,6 +8435,7 @@ int register_md_cluster_operations(struct md_cluster_operations *ops,
struct module *module)
{
int ret = 0;
+
spin_lock(&pers_lock);
if (md_cluster_ops != NULL)
ret = -EALREADY;
@@ -8420,6 +8460,7 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
int ret;
+
if (!md_cluster_ops)
request_module("md-cluster");
spin_lock(&pers_lock);
@@ -8455,6 +8496,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
+
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
@@ -8775,6 +8817,7 @@ void md_do_sync(struct md_thread *thread)
do {
int mddev2_minor = -1;
+
mddev->curr_resync = MD_RESYNC_DELAYED;
try_again:
@@ -8790,6 +8833,7 @@ void md_do_sync(struct md_thread *thread)
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
+
if (mddev < mddev2 &&
mddev->curr_resync == MD_RESYNC_DELAYED) {
/* arbitrarily yield */
@@ -9316,6 +9360,7 @@ void md_check_recovery(struct mddev *mddev)
if (!md_is_rdwr(mddev)) {
struct md_rdev *rdev;
+
if (!mddev->external && mddev->in_sync)
/* 'Blocked' flag not needed as failed devices
* will be recorded if array switched to read/write.
@@ -9545,6 +9590,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
{
struct mddev *mddev = rdev->mddev;
int rv;
+
if (is_new)
s += rdev->new_data_offset;
else
@@ -9568,6 +9614,7 @@ int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
int rv;
+
if (is_new)
s += rdev->new_data_offset;
else
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 11b9815f153d..73da2534da88 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -39,6 +39,7 @@ static void dump_zones(struct mddev *mddev)
sector_t zone_start = 0;
struct r0conf *conf = mddev->private;
int raid_disks = conf->strip_zone[0].nb_dev;
+
pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
mdname(mddev),
conf->nr_strip_zones, conf->nr_strip_zones == 1 ? "" : "s");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 809a46dbbaef..be86333104fe 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -244,6 +244,7 @@ static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio **bio = r1_bio->bios + i;
+
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
@@ -266,6 +267,7 @@ static void put_buf(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio *bio = r1_bio->bios[i];
+
if (bio->bi_end_io)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
}
@@ -387,6 +389,7 @@ static void raid1_end_read_request(struct bio *bio)
* Here we redefine "uptodate" to mean "Don't want to retry"
*/
unsigned long flags;
+
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
@@ -541,6 +544,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can return now */
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
struct bio *mbio = r1_bio->master_bio;
+
pr_debug("raid1: behind end write sectors"
" %llu-%llu\n",
(unsigned long long) mbio->bi_iter.bi_sector,
@@ -689,6 +693,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else {
sector_t good_sectors = first_bad - this_sector;
+
if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors;
best_disk = disk;
@@ -800,6 +805,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void *)bio->bi_bdev;
+
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
@@ -1237,6 +1243,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (r1bio_existed) {
/* Need to get the block device name carefully */
struct md_rdev *rdev;
+
rcu_read_lock();
rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
if (rdev)
@@ -1351,8 +1358,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
-
DEFINE_WAIT(w);
+
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
return;
@@ -1460,6 +1467,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
}
if (is_bad) {
int good_sectors = first_bad - r1_bio->sector;
+
if (good_sectors < max_sectors)
max_sectors = good_sectors;
}
@@ -1517,6 +1525,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < disks; i++) {
struct bio *mbio = NULL;
struct md_rdev *rdev = conf->mirrors[i].rdev;
+
if (!r1_bio->bios[i])
continue;
@@ -1632,6 +1641,7 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
seq_printf(seq, "%s",
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
@@ -1704,6 +1714,7 @@ static void print_conf(struct r1conf *conf)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
if (rdev)
pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
@@ -1743,6 +1754,7 @@ static int raid1_spare_active(struct mddev *mddev)
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+
if (repl
&& !test_bit(Candidate, &repl->flags)
&& repl->recovery_offset == MaxSector
@@ -2157,6 +2169,7 @@ static void process_checks(struct r1bio *r1_bio)
blk_status_t status;
struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b);
+
if (b->bi_end_io != end_sync_read)
continue;
/* fixup the bio for reuse, but preserve errno */
@@ -2279,6 +2292,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
sector_t sect, int sectors)
{
struct mddev *mddev = conf->mddev;
+
while (sectors) {
int s = sectors;
int d = read_disk;
@@ -2319,6 +2333,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (!success) {
/* Cannot read from anywhere - mark it bad */
struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
+
if (!rdev_set_badblocks(rdev, sect, s, 0))
md_error(mddev, rdev);
break;
@@ -2405,6 +2420,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
while (sect_to_write) {
struct bio *wbio;
+
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors'*/
@@ -2443,9 +2459,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
{
int m;
int s = r1_bio->sectors;
+
for (m = 0; m < conf->raid_disks * 2 ; m++) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
struct bio *bio = r1_bio->bios[m];
+
if (bio->bi_end_io == NULL)
continue;
if (!bio->bi_status &&
@@ -2470,6 +2488,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
+
rdev_clear_badblocks(rdev,
r1_bio->sector,
r1_bio->sectors, 0);
@@ -2565,6 +2584,7 @@ static void raid1d(struct md_thread *thread)
if (!list_empty_careful(&conf->bio_end_io_list) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
+
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
list_splice_init(&conf->bio_end_io_list, &tmp);
@@ -2761,6 +2781,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
+
bio = r1_bio->bios[i];
rdev = rcu_dereference(conf->mirrors[i].rdev);
@@ -2831,9 +2852,11 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* need to mark them bad on all write targets
*/
int ok = 1;
+
for (i = 0 ; i < conf->raid_disks * 2 ; i++)
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
+
ok = rdev_set_badblocks(rdev, sector_nr,
min_bad, 0
) && ok;
@@ -2870,6 +2893,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* drives must be failed - so we are finished
*/
sector_t rv;
+
if (min_bad > 0)
max_sector = sector_nr + min_bad;
rv = max_sector - sector_nr;
@@ -2887,6 +2911,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
do {
struct page *page;
int len = PAGE_SIZE;
+
if (sector_nr + (len>>9) > max_sector)
len = (max_sector - sector_nr) << 9;
if (len == 0)
@@ -3028,6 +3053,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->device_lock);
rdev_for_each(rdev, mddev) {
int disk_idx = rdev->raid_disk;
+
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
@@ -3221,11 +3247,13 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
+
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
if (mddev->bitmap) {
int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+
if (ret)
return ret;
}
@@ -3318,6 +3346,7 @@ static int raid1_reshape(struct mddev *mddev)
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
+
if (rdev && rdev->raid_disk != d2) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = d2;
@@ -3367,6 +3396,7 @@ static void *raid1_takeover(struct mddev *mddev)
*/
if (mddev->level == 5 && mddev->raid_disks == 2) {
struct r1conf *conf;
+
mddev->new_level = 1;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8f3339e73f55..b9dbe22818bf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -262,6 +262,7 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
for (i = 0; i < conf->geo.raid_disks; i++) {
struct bio **bio = &r10_bio->devs[i].bio;
+
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
@@ -619,6 +620,7 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
int d = dev;
int set;
sector_t s = sector;
+
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
slot++;
@@ -689,6 +691,7 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
offset = sector & geo->chunk_mask;
if (geo->far_offset) {
int fc;
+
chunk = sector >> geo->chunk_shift;
fc = sector_div(chunk, geo->far_copies);
dev -= fc * geo->near_copies;
@@ -910,6 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void *)bio->bi_bdev;
+
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
@@ -1125,6 +1129,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void *)bio->bi_bdev;
+
bio->bi_next = NULL;
bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
@@ -1516,6 +1521,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
}
if (is_bad) {
int good_sectors = first_bad - dev_sector;
+
if (good_sectors < max_sectors)
max_sectors = good_sectors;
}
@@ -1845,6 +1851,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
*/
if (r10_bio->devs[disk].bio) {
struct md_rdev *rdev = conf->mirrors[disk].rdev;
+
mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
&mddev->bio_set);
mbio->bi_end_io = raid10_end_discard_request;
@@ -1859,6 +1866,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
}
if (r10_bio->devs[disk].repl_bio) {
struct md_rdev *rrdev = conf->mirrors[disk].replacement;
+
rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
&mddev->bio_set);
rbio->bi_end_io = raid10_end_discard_request;
@@ -1951,6 +1959,7 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev)
rcu_read_lock();
for (i = 0; i < conf->geo.raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
@@ -1967,6 +1976,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
int first = 0;
int has_enough = 0;
int disks, ncopies;
+
if (previous) {
disks = conf->prev.raid_disks;
ncopies = conf->prev.near_copies;
@@ -1980,8 +1990,10 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
int n = conf->copies;
int cnt = 0;
int this = first;
+
while (n--) {
struct md_rdev *rdev;
+
if (this != ignore) {
rdev = rcu_dereference(conf->mirrors[this].rdev);
if (rdev && test_bit(In_sync, &rdev->flags))
@@ -2167,6 +2179,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
mirror = first;
for ( ; mirror <= last ; mirror++) {
struct raid10_info *p = &conf->mirrors[mirror];
+
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
if (p->rdev) {
@@ -2318,6 +2331,7 @@ static void end_sync_request(struct r10bio *r10_bio)
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
sector_t s = r10_bio->sectors;
+
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
test_bit(R10BIO_WriteError, &r10_bio->state))
reschedule_retry(r10_bio);
@@ -2327,6 +2341,7 @@ static void end_sync_request(struct r10bio *r10_bio)
break;
} else {
struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
+
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
test_bit(R10BIO_WriteError, &r10_bio->state))
reschedule_retry(r10_bio);
@@ -2439,8 +2454,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* All vec entries are PAGE_SIZE;
*/
int sectors = r10_bio->sectors;
+
for (j = 0; j < vcnt; j++) {
int len = PAGE_SIZE;
+
if (sectors < (len / 512))
len = sectors * 512;
if (memcmp(page_address(fpages[j]),
@@ -2584,6 +2601,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
if (rdev != conf->mirrors[dw].rdev) {
/* need bad block on destination too */
struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
+
addr = r10_bio->devs[1].addr + sect;
ok = rdev_set_badblocks(rdev2, addr, s, 0);
if (!ok) {
@@ -2789,6 +2807,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
* reads.
*/
int dn = r10_bio->devs[r10_bio->read_slot].devnum;
+
rdev = conf->mirrors[dn].rdev;
if (!rdev_set_badblocks(
@@ -2925,6 +2944,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
while (sect_to_write) {
struct bio *wbio;
sector_t wsector;
+
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
@@ -2999,6 +3019,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
test_bit(R10BIO_IsRecover, &r10_bio->state)) {
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
+
rdev = conf->mirrors[dev].rdev;
if (r10_bio->devs[m].bio == NULL ||
r10_bio->devs[m].bio->bi_end_io == NULL)
@@ -3036,9 +3057,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
put_buf(r10_bio);
} else {
bool fail = false;
+
for (m = 0; m < conf->copies; m++) {
int dev = r10_bio->devs[m].devnum;
struct bio *bio = r10_bio->devs[m].bio;
+
rdev = conf->mirrors[dev].rdev;
if (bio == IO_MADE_GOOD) {
rdev_clear_badblocks(
@@ -3099,6 +3122,7 @@ static void raid10d(struct md_thread *thread)
if (!list_empty_careful(&conf->bio_end_io_list) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
LIST_HEAD(tmp);
+
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
while (!list_empty(&conf->bio_end_io_list)) {
@@ -3416,6 +3440,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* recovery... the complicated one */
int j;
+
r10_bio = NULL;
for (i = 0 ; i < conf->geo.raid_disks; i++) {
@@ -3521,6 +3546,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rcu_dereference(conf->mirrors[d].rdev);
sector_t sector, first_bad;
int bad_sectors;
+
if (!rdev ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -3606,6 +3632,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* on other device(s)
*/
int k;
+
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
@@ -3649,8 +3676,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* readable copy.
*/
int targets = 1;
+
for (; j < conf->copies; j++) {
int d = r10_bio->devs[j].devnum;
+
if (conf->mirrors[d].rdev &&
test_bit(In_sync,
&conf->mirrors[d].rdev->flags))
@@ -3664,6 +3693,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (biolist == NULL) {
while (r10_bio) {
struct r10bio *rb2 = r10_bio;
+
r10_bio = (struct r10bio *) rb2->master_bio;
rb2->master_bio = NULL;
put_buf(rb2);
@@ -3778,6 +3808,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (count < 2) {
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
+
if (r10_bio->devs[i].bio->bi_end_io)
rdev_dec_pending(conf->mirrors[d].rdev,
mddev);
@@ -3799,12 +3830,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
do {
struct page *page;
int len = PAGE_SIZE;
+
if (sector_nr + (len>>9) > max_sector)
len = (max_sector - sector_nr) << 9;
if (len == 0)
break;
for (bio = biolist; bio; bio = bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
+
page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big enough
@@ -3954,6 +3987,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
{
int nc, fc, fo;
int layout, chunk, disks;
+
switch (new) {
case geo_old:
layout = mddev->layout;
@@ -4347,6 +4381,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return -EINVAL;
if (mddev->bitmap) {
int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
+
if (ret)
return ret;
}
@@ -4493,6 +4528,7 @@ static int calc_degraded(struct r10conf *conf)
/* 'prev' section first */
for (i = 0; i < conf->prev.raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded++;
else if (!test_bit(In_sync, &rdev->flags))
@@ -4509,6 +4545,7 @@ static int calc_degraded(struct r10conf *conf)
degraded2 = 0;
for (i = 0; i < conf->geo.raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded2++;
else if (!test_bit(In_sync, &rdev->flags)) {
@@ -4597,6 +4634,7 @@ static int raid10_start_reshape(struct mddev *mddev)
smp_mb();
if (mddev->reshape_backwards) {
sector_t size = raid10_size(mddev, 0, 0);
+
if (size < mddev->array_sectors) {
spin_unlock_irq(&conf->device_lock);
pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
@@ -4958,6 +4996,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev2;
+
if (s&1) {
rdev2 = rcu_dereference(conf->mirrors[d].replacement);
b = r10_bio->devs[s/2].repl_bio;
@@ -4984,6 +5023,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
struct page *page = pages[s / (PAGE_SIZE >> 9)];
int len = (max_sectors - s) << 9;
+
if (len > PAGE_SIZE)
len = PAGE_SIZE;
for (bio = blist; bio ; bio = bio->bi_next) {
@@ -5049,6 +5089,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev;
+
rcu_read_lock();
if (s&1) {
rdev = rcu_dereference(conf->mirrors[d].replacement);
@@ -5138,6 +5179,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
int d = r10b->devs[slot].devnum;
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
sector_t addr;
+
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
@@ -5228,11 +5270,13 @@ static void raid10_finish_reshape(struct mddev *mddev)
mddev->resync_max_sectors = mddev->array_sectors;
} else {
int d;
+
rcu_read_lock();
for (d = conf->geo.raid_disks ;
d < conf->geo.raid_disks - mddev->delta_disks;
d++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+
if (rdev)
clear_bit(In_sync, &rdev->flags);
rdev = rcu_dereference(conf->mirrors[d].replacement);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 46182b955aef..123cc38d4a02 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2148,6 +2148,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
}
if (!sh) {
int new_size = conf->min_nr_stripes * 2;
+
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
mdname(mddev),
new_size);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index e495939bb3e0..3ba595ec6ad8 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -182,6 +182,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
/* rcw: xor data from all not updated disks */
for (i = disks; i--;) {
struct r5dev *dev = &sh->dev[i];
+
if (test_bit(R5_UPTODATE, &dev->flags))
srcs[count++] = dev->page;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4bdfbe1f8fcf..fb18598e81d3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -73,6 +73,7 @@ static struct workqueue_struct *raid5_wq;
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK;
+
return &conf->stripe_hashtbl[hash];
}
@@ -99,6 +100,7 @@ static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
__acquires(&conf->device_lock)
{
int i;
+
spin_lock_irq(conf->hash_locks);
for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
@@ -109,6 +111,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
__releases(&conf->device_lock)
{
int i;
+
spin_unlock(&conf->device_lock);
for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
spin_unlock(conf->hash_locks + i);
@@ -185,6 +188,7 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
if (list_empty(&sh->lru)) {
struct r5worker_group *group;
+
group = conf->worker_groups + cpu_to_group(cpu);
if (stripe_is_lowprio(sh))
list_add_tail(&sh->lru, &group->loprio_list);
@@ -697,6 +701,7 @@ int raid5_calc_degraded(struct r5conf *conf)
degraded = 0;
for (i = 0; i < conf->previous_raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = rcu_dereference(conf->disks[i].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -723,6 +728,7 @@ int raid5_calc_degraded(struct r5conf *conf)
degraded2 = 0;
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = rcu_dereference(conf->disks[i].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -986,6 +992,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf,
if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
int seq = sh->bm_seq;
+
if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
sh->batch_head->bm_seq > seq)
seq = sh->batch_head->bm_seq;
@@ -1490,8 +1497,10 @@ static void ops_run_biofill(struct stripe_head *sh)
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (test_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi;
+
spin_lock_irq(&sh->stripe_lock);
dev->read = rbi = dev->toread;
dev->toread = NULL;
@@ -2088,6 +2097,7 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (head_sh->dev[i].written ||
test_bit(R5_InJournal, &head_sh->dev[i].flags)) {
off_srcs[count] = dev->offset;
@@ -2099,6 +2109,7 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
off_dest = sh->dev[pd_idx].offset;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (i != pd_idx) {
off_srcs[count] = dev->offset;
xor_srcs[count++] = dev->page;
@@ -2345,6 +2356,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (overlap_clear && !sh->batch_head) {
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -2679,6 +2691,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
for (i = conf->raid_disks; i < newsize; i++)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
+
nsh->dev[i].page = p;
nsh->dev[i].orig_page = p;
nsh->dev[i].offset = 0;
@@ -2957,6 +2970,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
unsigned long flags;
+
pr_debug("raid456: error called\n");
pr_crit("md/raid:%s: Disk failure on %pg, disabling device.\n",
@@ -3428,6 +3442,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (i == pd_idx || i == qd_idx)
continue;
@@ -3570,6 +3585,7 @@ static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi,
if (forwrite) {
/* check if page is covered */
sector_t sector = sh->dev[dd_idx].sector;
+
for (bi = sh->dev[dd_idx].towrite;
sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
bi && bi->bi_iter.bi_sector <= sector;
@@ -3656,13 +3672,16 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
int i;
+
BUG_ON(sh->batch_head);
+
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
struct md_rdev *rdev;
+
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags) &&
@@ -3792,6 +3811,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
@@ -3989,6 +4009,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
* do it if failed >= 2
*/
int other;
+
for (other = disks; other--; ) {
if (other == disk_idx)
continue;
@@ -4083,6 +4104,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
test_bit(R5_SkipCopy, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
+
pr_debug("Return write for disc %d\n", i);
if (test_and_clear_bit(R5_Discard, &dev->flags))
clear_bit(R5_UPTODATE, &dev->flags);
@@ -4126,6 +4148,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
int hash;
+
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) {
@@ -4209,6 +4232,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
+
if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
@@ -4246,6 +4270,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (test_bit(R5_InJournal, &dev->flags) &&
dev->page == dev->orig_page &&
!test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
@@ -4276,6 +4301,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
i == sh->pd_idx || i == sh->qd_idx ||
test_bit(R5_InJournal, &dev->flags)) &&
@@ -4298,9 +4324,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
int qread = 0;
+
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
@@ -4620,11 +4648,11 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
{
int i;
-
/* We have read all the blocks in this stripe and now we need to
* copy some of them into a target stripe for expand.
*/
struct dma_async_tx_descriptor *tx = NULL;
+
BUG_ON(sh->batch_head);
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
@@ -4892,6 +4920,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
static int clear_batch_ready(struct stripe_head *sh)
{
struct stripe_head *tmp;
+
if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
return (sh->batch_head && sh->batch_head != sh);
spin_lock(&sh->stripe_lock);
@@ -5107,6 +5136,7 @@ static void handle_stripe(struct stripe_head *sh)
!test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
+
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
dev->written || test_bit(R5_InJournal,
@@ -5256,6 +5286,7 @@ static void handle_stripe(struct stripe_head *sh)
if (s.failed <= conf->max_degraded && !conf->mddev->ro)
for (i = 0; i < s.failed; i++) {
struct r5dev *dev = &sh->dev[s.failed_num[i]];
+
if (test_bit(R5_ReadError, &dev->flags)
&& !test_bit(R5_LOCKED, &dev->flags)
&& test_bit(R5_UPTODATE, &dev->flags)
@@ -5337,6 +5368,7 @@ static void handle_stripe(struct stripe_head *sh)
for (i = disks; i--; ) {
struct md_rdev *rdev;
struct r5dev *dev = &sh->dev[i];
+
if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
/* We own a safe reference to the rdev */
rdev = rdev_pend_deref(conf->disks[i].rdev);
@@ -5387,8 +5419,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
while (!list_empty(&conf->delayed_list)) {
struct list_head *l = conf->delayed_list.next;
- struct stripe_head *sh;
- sh = list_entry(l, struct stripe_head, lru);
+ struct stripe_head *sh = list_entry(l, struct stripe_head, lru);
+
list_del_init(l);
clear_bit(STRIPE_DELAYED, &sh->state);
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -5404,11 +5436,13 @@ static void activate_bit_delay(struct r5conf *conf,
__must_hold(&conf->device_lock)
{
struct list_head head;
+
list_add(&head, &conf->bitmap_list);
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
int hash;
+
list_del_init(&sh->lru);
atomic_inc(&sh->count);
hash = sh->hash_lock_index;
@@ -5603,6 +5637,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
+
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio);
submit_bio_noacct(raid_bio);
@@ -5648,6 +5683,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
wg = &conf->worker_groups[group];
} else {
int i;
+
for (i = 0; i < conf->group_cnt; i++) {
handle_list = try_loprio ? &conf->worker_groups[i].loprio_list :
&conf->worker_groups[i].handle_list;
@@ -5780,6 +5816,7 @@ static void release_stripe_plug(struct mddev *mddev,
if (cb->list.next == NULL) {
int i;
+
INIT_LIST_HEAD(&cb->list);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
INIT_LIST_HEAD(cb->temp_inactive_list + i);
@@ -6367,6 +6404,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
int j;
int skipped_disk = 0;
+
sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPANDING, &sh->state);
@@ -6376,6 +6414,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
*/
for (j = sh->disks; j--; ) {
sector_t s;
+
if (j == sh->pd_idx)
continue;
if (conf->level == 6 &&
@@ -6771,6 +6810,7 @@ static void raid5d(struct md_thread *thread)
while ((bio = remove_bio_from_retry(conf, &offset))) {
int ok;
+
spin_unlock_irq(&conf->device_lock);
ok = retry_aligned_read(conf, bio, offset);
spin_lock_irq(&conf->device_lock);
@@ -6830,6 +6870,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
{
struct r5conf *conf;
int ret = 0;
+
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
@@ -6902,6 +6943,7 @@ static ssize_t
raid5_show_rmw_level(struct mddev *mddev, char *page)
{
struct r5conf *conf = mddev->private;
+
if (conf)
return sprintf(page, "%d\n", conf->rmw_level);
else
@@ -7040,6 +7082,7 @@ raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
struct r5conf *conf;
int ret = 0;
+
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
@@ -7085,6 +7128,7 @@ raid5_show_skip_copy(struct mddev *mddev, char *page)
{
struct r5conf *conf;
int ret = 0;
+
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
@@ -7136,6 +7180,7 @@ static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
{
struct r5conf *conf = mddev->private;
+
if (conf)
return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
else
@@ -7150,6 +7195,7 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
{
struct r5conf *conf;
int ret = 0;
+
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
@@ -7266,6 +7312,7 @@ static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
for (j = 0; j < cnt; j++) {
struct r5worker *worker = group->workers + j;
+
worker->group = group;
INIT_WORK(&worker->work, raid5_do_work);
@@ -8108,6 +8155,7 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+
seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
@@ -8379,6 +8427,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
return -EINVAL;
if (mddev->bitmap) {
int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
+
if (ret)
return ret;
}
@@ -8404,6 +8453,7 @@ static int check_stripe_cache(struct mddev *mddev)
* stripe_heads first.
*/
struct r5conf *conf = mddev->private;
+
if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
> conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
@@ -8436,6 +8486,7 @@ static int check_reshape(struct mddev *mddev)
* Otherwise 2 is the minimum
*/
int min = 2;
+
if (mddev->level == 6)
min = 4;
if (mddev->raid_disks + mddev->delta_disks < min)
@@ -8639,6 +8690,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
if (mddev->delta_disks <= 0) {
int d;
+
spin_lock_irq(&conf->device_lock);
mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irq(&conf->device_lock);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index c629bfae826f..9e7088e03852 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -180,6 +180,7 @@ static inline void cpu_relax(void)
static inline uint32_t raid6_jiffies(void)
{
struct timeval tv;
+
gettimeofday(&tv, NULL);
return tv.tv_sec*1000 + tv.tv_usec/1000;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 13/34] md: space prohibited between function and opening parenthesis [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (11 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 12/34] md: add missing blank line after declaration [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 14/34] md: prefer seq_put[cs]() to seq_printf() |WARNING] heinzm
` (21 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-linear.c | 12 ++++++------
drivers/md/md-multipath.c | 26 +++++++++++++-------------
drivers/md/md.c | 2 +-
drivers/md/raid0.c | 8 ++++----
drivers/md/raid10.c | 4 +---
drivers/md/raid5.c | 16 +++++++---------
6 files changed, 32 insertions(+), 36 deletions(-)
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 35ee116bf45b..35383589ec77 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -132,7 +132,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
return NULL;
}
-static int linear_run (struct mddev *mddev)
+static int linear_run(struct mddev *mddev)
{
struct linear_conf *conf;
int ret;
@@ -265,7 +265,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
return true;
}
-static void linear_status (struct seq_file *seq, struct mddev *mddev)
+static void linear_status(struct seq_file *seq, struct mddev *mddev)
{
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
@@ -287,14 +287,14 @@ static struct md_personality linear_personality = {
.quiesce = linear_quiesce,
};
-static int __init linear_init (void)
+static int __init linear_init(void)
{
- return register_md_personality (&linear_personality);
+ return register_md_personality(&linear_personality);
}
-static void linear_exit (void)
+static void linear_exit(void)
{
- unregister_md_personality (&linear_personality);
+ unregister_md_personality(&linear_personality);
}
module_init(linear_init);
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index a26ed5a3643b..6cc169abef00 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -23,7 +23,7 @@
#define NR_RESERVED_BUFS 32
-static int multipath_map (struct mpconf *conf)
+static int multipath_map(struct mpconf *conf)
{
int i, disks = conf->raid_disks;
@@ -49,7 +49,7 @@ static int multipath_map (struct mpconf *conf)
return (-1);
}
-static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
+static void multipath_reschedule_retry(struct multipath_bh *mp_bh)
{
unsigned long flags;
struct mddev *mddev = mp_bh->mddev;
@@ -88,7 +88,7 @@ static void multipath_end_request(struct bio *bio)
/*
* oops, IO error:
*/
- md_error (mp_bh->mddev, rdev);
+ md_error(mp_bh->mddev, rdev);
pr_info("multipath: %pg: rescheduling sector %llu\n",
rdev->bdev,
(unsigned long long)bio->bi_iter.bi_sector);
@@ -137,13 +137,13 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
struct mpconf *conf = mddev->private;
int i;
- seq_printf (seq, " [%d/%d] [", conf->raid_disks,
- conf->raid_disks - mddev->degraded);
+ seq_printf(seq, " [%d/%d] [", conf->raid_disks,
+ conf->raid_disks - mddev->degraded);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
seq_putc(seq, ']');
@@ -152,7 +152,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
/*
* Careful, this can execute in IRQ contexts as well!
*/
-static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
+static void multipath_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
@@ -184,7 +184,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
conf->raid_disks - mddev->degraded);
}
-static void print_multipath_conf (struct mpconf *conf)
+static void print_multipath_conf(struct mpconf *conf)
{
int i;
struct multipath_info *tmp;
@@ -338,7 +338,7 @@ static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_d
return mddev->dev_sectors;
}
-static int multipath_run (struct mddev *mddev)
+static int multipath_run(struct mddev *mddev)
{
struct mpconf *conf;
int disk_idx;
@@ -455,14 +455,14 @@ static struct md_personality multipath_personality = {
.size = multipath_size,
};
-static int __init multipath_init (void)
+static int __init multipath_init(void)
{
- return register_md_personality (&multipath_personality);
+ return register_md_personality(&multipath_personality);
}
-static void __exit multipath_exit (void)
+static void __exit multipath_exit(void)
{
- unregister_md_personality (&multipath_personality);
+ unregister_md_personality(&multipath_personality);
}
module_init(multipath_init);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 918565c03279..e50a1bcf0a1c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5638,7 +5638,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
int partitioned;
int shift;
int unit;
- int error ;
+ int error;
/*
* Wait for any previous instance of this device to be completely
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 73da2534da88..0fb47f4d78bf 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -768,14 +768,14 @@ static struct md_personality raid0_personality = {
.quiesce = raid0_quiesce,
};
-static int __init raid0_init (void)
+static int __init raid0_init(void)
{
- return register_md_personality (&raid0_personality);
+ return register_md_personality(&raid0_personality);
}
-static void raid0_exit (void)
+static void raid0_exit(void)
{
- unregister_md_personality (&raid0_personality);
+ unregister_md_personality(&raid0_personality);
}
module_init(raid0_init);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b9dbe22818bf..c8f909e8a25e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -5272,9 +5272,7 @@ static void raid10_finish_reshape(struct mddev *mddev)
int d;
rcu_read_lock();
- for (d = conf->geo.raid_disks ;
- d < conf->geo.raid_disks - mddev->delta_disks;
- d++) {
+ for (d = conf->geo.raid_disks; d < conf->geo.raid_disks - mddev->delta_disks; d++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fb18598e81d3..f5167eb71b5f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -157,7 +157,7 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
return slot;
}
-static void print_raid5_conf (struct r5conf *conf);
+static void print_raid5_conf(struct r5conf *conf);
static int stripe_operations_active(struct stripe_head *sh)
{
@@ -8151,18 +8151,18 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
conf->chunk_sectors / 2, mddev->layout);
- seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
+ seq_printf(seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
- seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
- seq_printf (seq, "]");
+ seq_printf(seq, "]");
}
-static void print_raid5_conf (struct r5conf *conf)
+static void print_raid5_conf(struct r5conf *conf)
{
struct md_rdev *rdev;
int i;
@@ -8694,9 +8694,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
spin_lock_irq(&conf->device_lock);
mddev->degraded = raid5_calc_degraded(conf);
spin_unlock_irq(&conf->device_lock);
- for (d = conf->raid_disks ;
- d < conf->raid_disks - mddev->delta_disks;
- d++) {
+ for (d = conf->raid_disks; d < conf->raid_disks - mddev->delta_disks; d++) {
rdev = rdev_mdlock_deref(mddev,
conf->disks[d].rdev);
if (rdev)
@@ -8872,7 +8870,7 @@ static int raid5_check_reshape(struct mddev *mddev)
mddev->layout = mddev->new_layout;
}
if (new_chunk > 0) {
- conf->chunk_sectors = new_chunk ;
+ conf->chunk_sectors = new_chunk;
mddev->chunk_sectors = new_chunk;
}
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 14/34] md: prefer seq_put[cs]() to seq_printf() |WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (12 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 13/34] md: space prohibited between function and opening parenthesis [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 15/34] md: avoid multiple line dereference [WARNING} heinzm
` (20 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 4 ++--
drivers/md/md-faulty.c | 2 +-
drivers/md/md.c | 52 +++++++++++++++++++++---------------------
drivers/md/raid1.c | 2 +-
drivers/md/raid10.c | 2 +-
drivers/md/raid5.c | 2 +-
6 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 3cee70340024..2db748c998e1 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2055,11 +2055,11 @@ void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
chunk_kb ? "KB" : "B");
if (bitmap->storage.file) {
- seq_printf(seq, ", file: ");
+ seq_puts(seq, ", file: ");
seq_file_path(seq, bitmap->storage.file, " \t\n");
}
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
index d6dbca5edab8..5d2de3f32ae7 100644
--- a/drivers/md/md-faulty.c
+++ b/drivers/md/md-faulty.c
@@ -255,7 +255,7 @@ static void faulty_status(struct seq_file *seq, struct mddev *mddev)
n, conf->period[ReadFixable]);
if (atomic_read(&conf->counters[WriteAll]))
- seq_printf(seq, " WriteAll");
+ seq_puts(seq, " WriteAll");
seq_printf(seq, " nfaults=%d", conf->nfaults);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e50a1bcf0a1c..2e764ddc55d6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8036,16 +8036,16 @@ static void status_unused(struct seq_file *seq)
int i = 0;
struct md_rdev *rdev;
- seq_printf(seq, "unused devices: ");
+ seq_puts(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
i++;
seq_printf(seq, "%pg ", rdev->bdev);
}
if (!i)
- seq_printf(seq, "<none>");
+ seq_puts(seq, "<none>");
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
static int status_resync(struct seq_file *seq, struct mddev *mddev)
@@ -8091,23 +8091,23 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
!test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset != MaxSector &&
rdev->recovery_offset) {
- seq_printf(seq, "\trecover=REMOTE");
+ seq_puts(seq, "\trecover=REMOTE");
return 1;
}
if (mddev->reshape_position != MaxSector)
- seq_printf(seq, "\treshape=REMOTE");
+ seq_puts(seq, "\treshape=REMOTE");
else
- seq_printf(seq, "\tresync=REMOTE");
+ seq_puts(seq, "\tresync=REMOTE");
return 1;
}
if (mddev->recovery_cp < MaxSector) {
- seq_printf(seq, "\tresync=PENDING");
+ seq_puts(seq, "\tresync=PENDING");
return 1;
}
return 0;
}
if (resync < MD_RESYNC_ACTIVE) {
- seq_printf(seq, "\tresync=DELAYED");
+ seq_puts(seq, "\tresync=DELAYED");
return 1;
}
@@ -8129,13 +8129,13 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
int i, x = per_milli/50, y = 20-x;
- seq_printf(seq, "[");
+ seq_putc(seq, '[');
for (i = 0; i < x; i++)
- seq_printf(seq, "=");
- seq_printf(seq, ">");
+ seq_putc(seq, '=');
+ seq_putc(seq, '>');
for (i = 0; i < y; i++)
- seq_printf(seq, ".");
- seq_printf(seq, "] ");
+ seq_putc(seq, '.');
+ seq_puts(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
@@ -8280,7 +8280,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
@@ -8295,9 +8295,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro == MD_RDONLY)
- seq_printf(seq, " (read-only)");
+ seq_puts(seq, " (read-only)");
if (mddev->ro == MD_AUTO_READ)
- seq_printf(seq, " (auto-read-only)");
+ seq_puts(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
@@ -8307,17 +8307,17 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
- seq_printf(seq, "(W)");
+ seq_puts(seq, "(W)");
if (test_bit(Journal, &rdev->flags))
- seq_printf(seq, "(J)");
+ seq_puts(seq, "(J)");
if (test_bit(Faulty, &rdev->flags)) {
- seq_printf(seq, "(F)");
+ seq_puts(seq, "(F)");
continue;
}
if (rdev->raid_disk < 0)
- seq_printf(seq, "(S)"); /* spare */
+ seq_puts(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
- seq_printf(seq, "(R)");
+ seq_puts(seq, "(R)");
sectors += rdev->sectors;
}
rcu_read_unlock();
@@ -8342,21 +8342,21 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
- seq_printf(seq, " super non-persistent");
+ seq_puts(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
- seq_printf(seq, "\n ");
+ seq_puts(seq, "\n ");
if (mddev->pers->sync_request) {
if (status_resync(seq, mddev))
- seq_printf(seq, "\n ");
+ seq_puts(seq, "\n ");
}
} else
- seq_printf(seq, "\n ");
+ seq_puts(seq, "\n ");
md_bitmap_status(seq, mddev->bitmap);
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
spin_unlock(&mddev->lock);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index be86333104fe..42671d0147ea 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1646,7 +1646,7 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
- seq_printf(seq, "]");
+ seq_puts(seq, "]");
}
/**
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c8f909e8a25e..61eb64ecd373 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1963,7 +1963,7 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
- seq_printf(seq, "]");
+ seq_puts(seq, "]");
}
/* check if there are enough drives for
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f5167eb71b5f..55afe09202c0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -8159,7 +8159,7 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
rcu_read_unlock();
- seq_printf(seq, "]");
+ seq_putc(seq, ']');
}
static void print_raid5_conf(struct r5conf *conf)
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 15/34] md: avoid multiple line dereference [WARNING}
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (13 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 14/34] md: prefer seq_put[cs]() to seq_printf() |WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 16/34] md: fix block comments [WARNING] heinzm
` (19 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 3 +--
drivers/md/md.c | 30 ++++++++++++------------------
2 files changed, 13 insertions(+), 20 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 2db748c998e1..fcf516d7fcff 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -469,8 +469,7 @@ void md_bitmap_update_sb(struct bitmap *bitmap)
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
- sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
- bitmap_info.space);
+ sb->sectors_reserved = cpu_to_le32(bitmap->mddev->bitmap_info.space);
kunmap_atomic(sb);
write_page(bitmap, bitmap->storage.sb_page, 1);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2e764ddc55d6..187fe8a25fc1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2833,8 +2833,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
* then added disks for geometry changes,
* and should be added immediately.
*/
- super_types[mddev->major_version].
- validate_super(mddev, rdev);
+ super_types[mddev->major_version].validate_super(mddev, rdev);
if (add_journal)
mddev_suspend(mddev);
err = mddev->pers->hot_add_disk(mddev, rdev);
@@ -3292,8 +3291,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
return -EINVAL;
if (mddev->pers && mddev->persistent &&
- !super_types[mddev->major_version]
- .allow_new_offset(rdev, new_offset))
+ !super_types[mddev->major_version].allow_new_offset(rdev, new_offset))
return -E2BIG;
rdev->new_data_offset = new_offset;
if (new_offset > rdev->data_offset)
@@ -3377,8 +3375,8 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return -EINVAL; /* too confusing */
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
- sectors = super_types[my_mddev->major_version].
- rdev_size_change(rdev, sectors);
+ sectors = super_types[my_mddev->major_version].rdev_size_change(rdev,
+ sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
@@ -3701,8 +3699,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
}
if (super_format >= 0) {
- err = super_types[super_format].
- load_super(rdev, NULL, super_minor);
+ err = super_types[super_format].load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
rdev->bdev,
@@ -3738,8 +3735,8 @@ static int analyze_sbs(struct mddev *mddev)
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
- switch (super_types[mddev->major_version].
- load_super(rdev, freshest, mddev->minor_version)) {
+ switch (super_types[mddev->major_version].load_super(rdev, freshest,
+ mddev->minor_version)) {
case 1:
freshest = rdev;
break;
@@ -3757,8 +3754,7 @@ static int analyze_sbs(struct mddev *mddev)
return -EINVAL;
}
- super_types[mddev->major_version].
- validate_super(mddev, freshest);
+ super_types[mddev->major_version].validate_super(mddev, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
@@ -3772,8 +3768,7 @@ static int analyze_sbs(struct mddev *mddev)
continue;
}
if (rdev != freshest) {
- if (super_types[mddev->major_version].
- validate_super(mddev, rdev)) {
+ if (super_types[mddev->major_version].validate_super(mddev, rdev)) {
pr_warn("md: kicking non-fresh %pg from array!\n",
rdev->bdev);
md_kick_rdev_from_array(rdev);
@@ -6793,8 +6788,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
rdev->raid_disk = -1;
rdev->saved_raid_disk = rdev->raid_disk;
} else
- super_types[mddev->major_version].
- validate_super(mddev, rdev);
+ super_types[mddev->major_version].validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
@@ -9831,8 +9825,8 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
if (err == 0) {
ClearPageUptodate(rdev->sb_page);
rdev->sb_loaded = 0;
- err = super_types[mddev->major_version].
- load_super(rdev, NULL, mddev->minor_version);
+ err = super_types[mddev->major_version].load_super(rdev, NULL,
+ mddev->minor_version);
}
if (err < 0) {
pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 16/34] md: fix block comments [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (14 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 15/34] md: avoid multiple line dereference [WARNING} heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 17/34] md: add missing function identifier names to function definition arguments [WARNING] heinzm
` (18 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 30 +++++-----
drivers/md/md-cluster.c | 17 ++++--
drivers/md/md-linear.c | 16 +++---
drivers/md/md.c | 121 ++++++++++++++++++++-------------------
drivers/md/raid0.c | 31 +++++-----
drivers/md/raid1.c | 18 +++---
drivers/md/raid10.c | 23 ++++----
drivers/md/raid5-cache.c | 24 ++++----
drivers/md/raid5-ppl.c | 11 ++--
drivers/md/raid5.c | 15 +++--
include/linux/raid/pq.h | 5 +-
11 files changed, 159 insertions(+), 152 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index fcf516d7fcff..9f1e25927d13 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -95,8 +95,7 @@ __acquires(bitmap->lock)
/* We don't support hijack for cluster raid */
if (no_hijack)
return -ENOMEM;
- /* failed - set the hijacked flag so that we can use the
- * pointer as a counter */
+ /* failed - set the hijacked flag so that we can use the pointer as a counter */
if (!bitmap->bp[page].map)
bitmap->bp[page].hijacked = 1;
} else if (bitmap->bp[page].map ||
@@ -815,8 +814,7 @@ static int md_bitmap_storage_alloc(struct bitmap_storage *store,
}
store->file_pages = pnum;
- /* We need 4 bits per page, rounded up to a multiple
- * of sizeof(unsigned long) */
+ /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
store->filemap_attr = kzalloc(
roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL);
@@ -887,7 +885,8 @@ static void md_bitmap_file_kick(struct bitmap *bitmap)
enum bitmap_page_attr {
BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
- * i.e. counter is 1 or 2. */
+ * i.e. counter is 1 or 2.
+ */
BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
};
@@ -1003,7 +1002,8 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
- * sync the dirty pages of the bitmap file to disk */
+ * sync the dirty pages of the bitmap file to disk
+ */
void md_bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
@@ -1014,8 +1014,7 @@ void md_bitmap_unplug(struct bitmap *bitmap)
test_bit(BITMAP_STALE, &bitmap->flags))
return;
- /* look at each page to see if there are any set bits that need to be
- * flushed out to disk */
+ /* look at each page to see if there are any set bits that need to be flushed out to disk */
for (i = 0; i < bitmap->storage.file_pages; i++) {
dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
need_write = test_and_clear_page_attr(bitmap, i,
@@ -1269,8 +1268,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
if (bitmap->need_sync &&
mddev->bitmap_info.external == 0) {
- /* Arrange for superblock update as well as
- * other changes */
+ /* Arrange for superblock update as well as other changes */
bitmap_super_t *sb;
bitmap->need_sync = 0;
@@ -1383,8 +1381,7 @@ __acquires(bitmap->lock)
/* now locked ... */
if (bitmap->bp[page].hijacked) { /* hijacked pointer */
- /* should we use the first or second counter field
- * of the hijacked pointer? */
+ /* should we use the first or second counter field of the hijacked pointer? */
int hi = (pageoff > PAGE_COUNTER_MASK);
return &((bitmap_counter_t *)
@@ -1879,8 +1876,7 @@ struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
err = 0;
if (mddev->bitmap_info.chunksize == 0 ||
mddev->bitmap_info.daemon_sleep == 0)
- /* chunksize and time_base need to be
- * set first. */
+ /* chunksize and time_base need to be set first. */
err = -EINVAL;
}
if (err)
@@ -1936,8 +1932,7 @@ int md_bitmap_load(struct mddev *mddev)
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
- /* no need to keep dirty bits to optimise a
- * re-add of a missing device */
+ /* no need to keep dirty bits to optimise a re-add of a missing device */
start = mddev->recovery_cp;
mutex_lock(&mddev->bitmap_info.mutex);
@@ -2018,7 +2013,8 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (clear_bits) {
md_bitmap_update_sb(bitmap);
/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
- * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
+ * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ...
+ */
for (i = 0; i < bitmap->storage.file_pages; i++)
if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index b61b1fba1c77..7ad5e1a97638 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -52,7 +52,8 @@ struct resync_info {
*/
#define MD_CLUSTER_SEND_LOCKED_ALREADY 5
/* We should receive message after node joined cluster and
- * set up all the related infos such as bitmap and personality */
+ * set up all the related infos such as bitmap and personalityi
+ */
#define MD_CLUSTER_ALREADY_IN_CLUSTER 6
#define MD_CLUSTER_PENDING_RECV_EVENT 7
#define MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD 8
@@ -331,7 +332,8 @@ static void recover_bitmaps(struct md_thread *thread)
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
/* wake up thread to continue resync in case resync
- * is not finished */
+ * is not finished
+ */
if (mddev->recovery_cp != MaxSector) {
/*
* clear the REMOTE flag since we will launch
@@ -383,7 +385,8 @@ static void recover_slot(void *arg, struct dlm_slot *slot)
slot->nodeid, slot->slot,
cinfo->slot_number);
/* deduct one since dlm slot starts from one while the num of
- * cluster-md begins with 0 */
+ * cluster-md begins with 0
+ */
__recover_slot(mddev, slot->slot - 1);
}
@@ -396,7 +399,8 @@ static void recover_done(void *arg, struct dlm_slot *slots,
cinfo->slot_number = our_slot;
/* completion is only need to be complete when node join cluster,
- * it doesn't need to run during another node's failure */
+ * it doesn't need to run during another node's failure
+ */
if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
complete(&cinfo->completion);
clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
@@ -405,7 +409,8 @@ static void recover_done(void *arg, struct dlm_slot *slots,
}
/* the ops is called when node join the cluster, and do lock recovery
- * if node failure occurs */
+ * if node failure occurs
+ */
static const struct dlm_lockspace_ops md_ls_ops = {
.recover_prep = recover_prep,
.recover_slot = recover_slot,
@@ -1443,7 +1448,7 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
*
* For other failure cases, metadata_update_cancel and
* add_new_disk_cancel also clear below bit as well.
- * */
+ */
set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
wake_up(&cinfo->wait);
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 35383589ec77..dc307ea05fe1 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- linear.c : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
-
- Linear mode management functions.
-
-*/
+ * linear.c : Multiple Devices driver for Linux
+ * Copyright (C) 1994-96 Marc ZYNGIER
+ * <zyngier@ufr-info-p7.ibp.fr> or
+ * <maz@gloups.fdn.fr>
+ *
+ * Linear mode management functions.
+ *
+ */
#include <linux/blkdev.h>
#include <linux/raid/md_u.h>
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 187fe8a25fc1..e63543c98ba6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1,40 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- md.c : Multiple Devices driver for Linux
- Copyright (C) 1998, 1999, 2000 Ingo Molnar
-
- completely rewritten, based on the MD driver code from Marc Zyngier
-
- Changes:
-
- - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- - kmod support by: Cyrus Durgin
- - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
-
- - lots of fixes and improvements to the RAID1/RAID5 and generic
- RAID code (such as request based resynchronization):
-
- Neil Brown <neilb@cse.unsw.edu.au>.
-
- - persistent bitmap code
- Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
-
-
- Errors, Warnings, etc.
- Please use:
- pr_crit() for error conditions that risk data loss
- pr_err() for error conditions that are unexpected, like an IO error or internal inconsistency
- pr_warn() for error conditions that could have been predicated, like
- adding a device to an array when it has incompatible metadata
- pr_info() for every interesting, very rare events, like an array starting
- for stopping, or resync starting or stopping
- pr_debug() for everything else.
-
-*/
+ * md.c : Multiple Devices driver for Linux
+ * Copyright (C) 1998, 1999, 2000 Ingo Molnar
+ *
+ * completely rewritten, based on the MD driver code from Marc Zyngier
+ *
+ * Changes:
+ *
+ * - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
+ * - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
+ * - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
+ * - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
+ * - kmod support by: Cyrus Durgin
+ * - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
+ * - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
+ *
+ * - lots of fixes and improvements to the RAID1/RAID5 and generic
+ * RAID code (such as request based resynchronization):
+ *
+ * Neil Brown <neilb@cse.unsw.edu.au>.
+ *
+ * - persistent bitmap code
+ * Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
+ *
+ *
+ * Errors, Warnings, etc.
+ * Please use:
+ * pr_crit() for error conditions that risk data loss
+ * pr_err() for error conditions that are unexpected, like an IO error or internal inconsistency
+ * pr_warn() for error conditions that could have been predicated, like
+ * adding a device to an array when it has incompatible metadata
+ * pr_info() for every interesting, very rare events, like an array starting
+ * for stopping, or resync starting or stopping
+ * pr_debug() for everything else.
+ */
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
@@ -655,8 +654,7 @@ void mddev_put(struct mddev *mddev)
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
- /* Array is not configured at all, and not held active,
- * so destroy it */
+ /* Array is not configured at all, and not held active, so destroy it */
set_bit(MD_DELETED, &mddev->flags);
/*
@@ -1353,7 +1351,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
- * for spares (which don't need an event count) */
+ * for spares (which don't need an event count)
+ */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
@@ -1378,15 +1377,16 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
- else if (desc->state & (1<<MD_DISK_SYNC) /* &&
- desc->raid_disk < mddev->raid_disks */) {
+ else if (desc->state & (1<<MD_DISK_SYNC)
+ /* && desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
rdev->saved_raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
- * that is, so set to zero for now */
+ * that is, so set to zero for now
+ */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
@@ -1886,7 +1886,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
- * spares (which don't need an event count) */
+ * spares (which don't need an event count)
+ */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
@@ -2214,8 +2215,7 @@ super_1_allow_new_offset(struct md_rdev *rdev,
if (new_offset >= rdev->data_offset)
return 1;
- /* with 1.0 metadata, there is no metadata to tread on
- * so we can always move back */
+ /* with 1.0 metadata, there is no metadata to tread on so we can always move back */
if (rdev->mddev->minor_version == 0)
return 1;
@@ -2729,7 +2729,8 @@ void md_update_sb(struct mddev *mddev, int force_change)
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
- * and 'events' is odd, we can roll back to the previous clean state */
+ * and 'events' is odd, we can roll back to the previous clean state
+ */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
@@ -3237,8 +3238,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
- /* Must set offset before size, so overlap checks
- * can be sane */
+ /* Must set offset before size, so overlap checks can be sane */
return -EBUSY;
rdev->data_offset = offset;
rdev->new_data_offset = offset;
@@ -4324,9 +4324,9 @@ __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
* like active, but no writes have been seen for a while (100msec).
*
* broken
-* Array is failed. It's useful because mounted-arrays aren't stopped
-* when array is failed, so this state will at least alert the user that
-* something is wrong.
+ * Array is failed. It's useful because mounted-arrays aren't stopped
+ * when array is failed, so this state will at least alert the user that
+ * something is wrong.
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, broken, bad_word};
@@ -6317,7 +6317,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
- * which will now never happen */
+ * which will now never happen
+ */
wake_up_process(mddev->sync_thread->tsk);
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
@@ -6381,7 +6382,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (mddev->sync_thread)
/* Thread might be blocked waiting for metadata update
- * which will now never happen */
+ * which will now never happen
+ */
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
@@ -7649,7 +7651,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
- * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
+ * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed
+ */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
@@ -9126,9 +9129,9 @@ void md_do_sync(struct md_thread *thread)
}
}
skip:
- /* set CHANGE_PENDING here since maybe another update is needed,
- * so other nodes are informed. It should be harmless for normal
- * raid */
+ /* set CHANGE_PENDING here since maybe another update is needed, so other
+ * so other nodes are informed. It should be harmless for normal raid
+ */
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
@@ -9522,7 +9525,8 @@ void md_reap_sync_thread(struct mddev *mddev)
md_update_sb(mddev, 1);
/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
- * clustered raid */
+ * clustered raidxi
+ */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
md_cluster_ops->resync_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
@@ -9760,7 +9764,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
pr_info("Activated spare: %pg\n",
rdev2->bdev);
/* wakeup mddev->thread here, so array could
- * perform resync with the new activated disk */
+ * perform resync with the new activated disk
+ */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0fb47f4d78bf..0089f0657651 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- raid0.c : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
- Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
-
- RAID-0 management functions.
-
-*/
+ * raid0.c : Multiple Devices driver for Linux
+ * Copyright (C) 1994-96 Marc ZYNGIER
+ * <zyngier@ufr-info-p7.ibp.fr> or
+ * <maz@gloups.fdn.fr>
+ * Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
+ *
+ * RAID-0 management functions.
+ *
+ */
#include <linux/blkdev.h>
#include <linux/seq_file.h>
@@ -31,7 +31,7 @@ module_param(default_layout, int, 0644);
/*
* inform the user of the raid configuration
-*/
+ */
static void dump_zones(struct mddev *mddev)
{
int j, k;
@@ -304,7 +304,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
/*
* remaps the bio to the target device. we separate two flows.
* power 2 flow and a general flow for the sake of performance
-*/
+ */
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
sector_t sector, sector_t *sector_offset)
{
@@ -328,11 +328,10 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
chunk = *sector_offset;
sector_div(chunk, chunk_sects * zone->nb_dev);
}
- /*
- * position the bio over the real device
- * real sector = chunk in device + starting of zone
- * + the position in the chunk
- */
+ /* position the bio over the real device
+ * real sector = chunk in device + starting of zone
+ * + the position in the chunk
+ */
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 42671d0147ea..5b7d1dea889d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -380,8 +380,7 @@ static void raid1_end_read_request(struct bio *bio)
set_bit(R1BIO_Uptodate, &r1_bio->state);
else if (test_bit(FailFast, &rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
- /* This was a fail-fast read so we definitely
- * want to retry */
+ /* This was a fail-fast read so we definitely want to retry */
;
else {
/* If all other devices have failed, we want to return
@@ -656,8 +655,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
rdev->recovery_offset < this_sector + sectors)
continue;
if (test_bit(WriteMostly, &rdev->flags)) {
- /* Don't balance among write-mostly, just
- * use the first as a last resort */
+ /* Don't balance among write-mostly, just use the first as a last resort */
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
@@ -1438,8 +1436,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
- /* mustn't write here until the bad block is
- * acknowledged*/
+ /* mustn't write here until the bad block is acknowledge */
set_bit(BlockedBadBlocks, &rdev->flags);
blocked_rdev = rdev;
break;
@@ -1533,7 +1530,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
- * is waiting for behind writes to flush */
+ * is waiting for behind writes to flush
+ */
if (bitmap &&
test_bit(WriteMostly, &rdev->flags) &&
(atomic_read(&bitmap->behind_writes)
@@ -2042,7 +2040,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[r1_bio->read_disk].rdev;
if (test_bit(FailFast, &rdev->flags)) {
/* Don't try recovering from here - just fail it
- * ... unless it is the last working device of course */
+ * ... unless it is the last working device of course
+ */
md_error(mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
/* Don't try to read from here, but make sure
@@ -2879,8 +2878,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (min_bad > 0 && min_bad < good_sectors) {
- /* only resync enough to reach the next bad->good
- * transition */
+ /* only resync enough to reach the next bad->good transition */
good_sectors = min_bad;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 61eb64ecd373..510000de0886 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -108,8 +108,7 @@ static void *r10bio_pool_alloc(gfp_t gfp_flags, void *data)
struct r10conf *conf = data;
int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
- /* allocate a r10bio with room for raid_disks entries in the
- * bios array */
+ /* allocate a r10bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
}
@@ -905,8 +904,7 @@ static void flush_pending_writes(struct r10conf *conf)
__set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
- /* flush any pending bitmap writes to disk
- * before proceeding w/ I/O */
+ /* flush any pending bitmap writes to disk before proceeding w/ I/O */
md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
@@ -2082,7 +2080,8 @@ static void print_conf(struct r10conf *conf)
conf->geo.raid_disks);
/* This is only called with ->reconfix_mutex held, so
- * rcu protection of rdev is not needed */
+ * rcu protection of rdev is not needed
+ */
for (i = 0; i < conf->geo.raid_disks; i++) {
rdev = conf->mirrors[i].rdev;
if (rdev)
@@ -2744,8 +2743,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
rdev = conf->mirrors[d].rdev;
if (test_bit(Faulty, &rdev->flags))
- /* drive has already been failed, just ignore any
- more fix_read_error() attempts */
+ /* drive has already been failed, just ignore any more fix_read_error() attempts */
return;
check_decay_read_errors(mddev, rdev);
@@ -3625,8 +3623,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
rcu_read_unlock();
if (j == conf->copies) {
- /* Cannot recover, so abort the recovery or
- * record a bad block */
+ /* Cannot recover, so abort the recovery or record a bad block */
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -4000,8 +3997,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
disks = mddev->raid_disks;
break;
default: /* avoid 'may be unused' warnings */
- case geo_start: /* new when starting reshape - raid_disks not
- * updated yet. */
+ case geo_start: /* new when starting reshape - raid_disks not updated yet. */
layout = mddev->new_layout;
chunk = mddev->new_chunk_sectors;
disks = mddev->raid_disks + mddev->delta_disks;
@@ -4024,7 +4020,8 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
geo->far_set_size = disks;
break;
case 1: /* "improved" layout which was buggy. Hopefully no-one is
- * actually using this, but leave code here just in case.*/
+ * actually using this, but leave code here just in case.
+ */
geo->far_set_size = disks/fc;
WARN(geo->far_set_size < fc,
"This RAID10 layout does not provide data safety - please backup and create new array\n");
@@ -4962,7 +4959,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
/*
* Broadcast RESYNC message to other nodes, so all nodes would not
* write to the region to avoid conflict.
- */
+ */
if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
struct mdp_superblock_1 *sb = NULL;
int sb_reshape_pos = 0;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 123cc38d4a02..f40ee2101796 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -84,13 +84,10 @@ struct r5l_log {
u32 uuid_checksum;
- sector_t device_size; /* log device size, round to
- * BLOCK_SECTORS */
- sector_t max_free_space; /* reclaim run if free space is at
- * this size */
+ sector_t device_size; /* log device size, round to BLOCK_SECTORS */
+ sector_t max_free_space; /* reclaim run if free space is at this size */
- sector_t last_checkpoint; /* log tail. where recovery scan
- * starts from */
+ sector_t last_checkpoint; /* log tail. where recovery scan starts from */
u64 last_cp_seq; /* log tail sequence */
sector_t log_start; /* log head. where new data appends */
@@ -104,12 +101,13 @@ struct r5l_log {
spinlock_t io_list_lock;
struct list_head running_ios; /* io_units which are still running,
* and have not yet been completely
- * written to the log */
+ * written to the log
+ */
struct list_head io_end_ios; /* io_units which have been completely
* written to the log but not yet written
- * to the RAID */
- struct list_head flushing_ios; /* io_units which are waiting for log
- * cache flush */
+ * to the RAID
+ */
+ struct list_head flushing_ios; /* io_units which are waiting for log cache flush */
struct list_head finished_ios; /* io_units which settle down in log disk */
struct bio flush_bio;
@@ -127,7 +125,8 @@ struct r5l_log {
* IO_UNIT_STRIPE_END state (eg, reclaim
* doesn't wait for specific io_unit
* switching to IO_UNIT_STRIPE_END
- * state) */
+ * state)
+ */
wait_queue_head_t iounit_wait;
struct list_head no_space_stripes; /* pending stripes, log has no space */
@@ -242,7 +241,8 @@ struct r5l_io_unit {
enum r5l_io_unit_state {
IO_UNIT_RUNNING = 0, /* accepting new IO */
IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
- * don't accepting new bio */
+ * don't accepting new bio
+ */
IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
};
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 3ba595ec6ad8..cfff345951db 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -92,7 +92,8 @@ struct ppl_conf {
int count;
int block_size; /* the logical block size used for data_sector
- * in ppl_header_entry */
+ * in ppl_header_entryxi
+ */
u32 signature; /* raid array identifier */
atomic64_t seq; /* current log write sequence number */
@@ -115,11 +116,13 @@ struct ppl_conf {
struct ppl_log {
struct ppl_conf *ppl_conf; /* shared between all log instances */
- struct md_rdev *rdev; /* array member disk associated with
- * this log instance */
+ struct md_rdev *rdev; /* array member disk associated
+ * with this log instance
+ */
struct mutex io_mutex;
struct ppl_io_unit *current_io; /* current io_unit accepting new data
- * always at the end of io_list */
+ * always at the end of io_list
+ */
spinlock_t io_list_lock;
struct list_head io_list; /* all io_units of this log */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 55afe09202c0..08a0ee77cacb 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1233,7 +1233,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
- * a chance*/
+ * a chance
+ */
md_check_recovery(conf->mddev);
}
/*
@@ -4837,7 +4838,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
- * only to .rdev, so make sure to check that*/
+ * only to .rdev, so make sure to check that
+ */
struct md_rdev *rdev2 = rcu_dereference(
conf->disks[i].rdev);
if (rdev2 == rdev)
@@ -4850,7 +4852,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
- * only to .rdev, so make sure to check that*/
+ * only to .rdev, so make sure to check that
+ */
struct md_rdev *rdev2 = rcu_dereference(
conf->disks[i].rdev);
if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
@@ -5035,7 +5038,8 @@ static void handle_stripe(struct stripe_head *sh)
if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
/* already being handled, ensure it gets handled
- * again when current action finishes */
+ * again when current action finishes
+ */
set_bit(STRIPE_HANDLE, &sh->state);
return;
}
@@ -7874,8 +7878,7 @@ static int raid5_run(struct mddev *mddev)
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
- /* here_old is the first stripe that we might need to read
- * from */
+ /* here_old is the first stripe that we might need to read from */
if (mddev->delta_disks == 0) {
/* We cannot be sure it is safe to start an in-place
* reshape. It is only safe if user-space is monitoring
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 9e7088e03852..7fa2bef58ff3 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* -*- linux-c -*- ------------------------------------------------------- *
+/* -*- linux-c -*- -------------------------------------------------------
*
* Copyright 2003 H. Peter Anvin - All Rights Reserved
*
- * ----------------------------------------------------------------------- */
+ * -----------------------------------------------------------------------
+ */
#ifndef LINUX_RAID_RAID6_H
#define LINUX_RAID_RAID6_H
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 17/34] md: add missing function identifier names to function definition arguments [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (15 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 16/34] md: fix block comments [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 18/34] md: avoid redundant braces in single line statements [WARNING] heinzm
` (17 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 4 ++--
include/linux/raid/pq.h | 8 ++++----
include/linux/raid/xor.h | 28 ++++++++++++++--------------
3 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e63543c98ba6..dbdd0288ddd2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2878,8 +2878,8 @@ static int cmd_match(const char *cmd, const char *str)
struct rdev_sysfs_entry {
struct attribute attr;
- ssize_t (*show)(struct md_rdev *, char *);
- ssize_t (*store)(struct md_rdev *, const char *, size_t);
+ ssize_t (*show)(struct md_rdev *rdev, char *buf);
+ ssize_t (*store)(struct md_rdev *rdev, const char *buf, size_t sz);
};
static ssize_t
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 7fa2bef58ff3..41c525e4c959 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -70,8 +70,8 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
/* Routine choices */
struct raid6_calls {
- void (*gen_syndrome)(int, size_t, void **);
- void (*xor_syndrome)(int, int, int, size_t, void **);
+ void (*gen_syndrome)(int disks, size_t bytes, void **ptrs);
+ void (*xor_syndrome)(int disks, int start, int stop, size_t bytes, void **ptrs);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
int priority; /* Relative priority ranking if non-zero */
@@ -111,8 +111,8 @@ extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
struct raid6_recov_calls {
- void (*data2)(int, size_t, int, int, void **);
- void (*datap)(int, size_t, int, void **);
+ void (*data2)(int disks, size_t bytes, int faila, int failb, void **ptrs);
+ void (*datap)(int disks, size_t bytes, int faila, void **ptrs);
int (*valid)(void);
const char *name;
int priority;
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 231f467935a9..1630b0681099 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,20 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *__restrict,
- const unsigned long *__restrict);
- void (*do_3)(unsigned long, unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict);
- void (*do_4)(unsigned long, unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict);
- void (*do_5)(unsigned long, unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict,
- const unsigned long *__restrict);
+ void (*do_2)(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2);
+ void (*do_3)(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3);
+ void (*do_4)(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4);
+ void (*do_5)(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5);
};
#endif
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 18/34] md: avoid redundant braces in single line statements [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (16 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 17/34] md: add missing function identifier names to function definition arguments [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 19/34] md: place constant on the right side of a test [WARNING] heinzm
` (16 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 3 +--
drivers/md/md-cluster.c | 3 +--
drivers/md/raid5.c | 3 +--
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 9f1e25927d13..65e77a7e3656 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2173,9 +2173,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
unsigned long k;
/* deallocate the page memory */
- for (k = 0; k < page; k++) {
+ for (k = 0; k < page; k++)
kfree(new_bp[k].map);
- }
kfree(new_bp);
/* restore some fields from old_counts */
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 7ad5e1a97638..762160e81ce8 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1531,9 +1531,8 @@ static void unlock_all_bitmaps(struct mddev *mddev)
/* release other node's bitmap lock if they are existed */
if (cinfo->other_bitmap_lockres) {
for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) {
- if (cinfo->other_bitmap_lockres[i]) {
+ if (cinfo->other_bitmap_lockres[i])
lockres_free(cinfo->other_bitmap_lockres[i]);
- }
}
kfree(cinfo->other_bitmap_lockres);
cinfo->other_bitmap_lockres = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 08a0ee77cacb..f834c497b8fe 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4109,9 +4109,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
pr_debug("Return write for disc %d\n", i);
if (test_and_clear_bit(R5_Discard, &dev->flags))
clear_bit(R5_UPTODATE, &dev->flags);
- if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
+ if (test_and_clear_bit(R5_SkipCopy, &dev->flags))
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
- }
do_endio = true;
returnbi:
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 19/34] md: place constant on the right side of a test [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (17 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 18/34] md: avoid redundant braces in single line statements [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 20/34] md: avoid pointless filenames in files [WARNING] heinzm
` (15 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/raid10.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 510000de0886..60c9fba59d9f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3259,8 +3259,8 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
/*
* At least use a 32M window to align with raid1's resync window
*/
- window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
- CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
+ window_size = (window_size < CLUSTER_RESYNC_WINDOW_SECTORS) ?
+ CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
conf->cluster_sync_high = conf->cluster_sync_low + window_size;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 20/34] md: avoid pointless filenames in files [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (18 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 19/34] md: place constant on the right side of a test [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 21/34] md: avoid useless else after break or return [WARNING] heinzm
` (14 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 2 +-
drivers/md/raid0.c | 3 ++-
drivers/md/raid1.c | 2 +-
drivers/md/raid10.c | 2 +-
drivers/md/raid5.c | 2 +-
5 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dbdd0288ddd2..e19edfe62516 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * md.c : Multiple Devices driver for Linux
+ * Multiple Devices driver for Linux
* Copyright (C) 1998, 1999, 2000 Ingo Molnar
*
* completely rewritten, based on the MD driver code from Marc Zyngier
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0089f0657651..b4c372c6861b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * raid0.c : Multiple Devices driver for Linux
+ * Multiple Devices driver for Linux
+ *
* Copyright (C) 1994-96 Marc ZYNGIER
* <zyngier@ufr-info-p7.ibp.fr> or
* <maz@gloups.fdn.fr>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5b7d1dea889d..bd245f41393a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * raid1.c : Multiple Devices driver for Linux
+ * Multiple Devices driver for Linux
*
* Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 60c9fba59d9f..dbad26fcca12 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * raid10.c : Multiple Devices driver for Linux
+ * Multiple Devices driver for Linux
*
* Copyright (C) 2000-2004 Neil Brown
*
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f834c497b8fe..a7b37a4e3f66 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * raid5.c : Multiple Devices driver for Linux
+ * Multiple Devices driver for Linux
* Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
* Copyright (C) 1999, 2000 Ingo Molnar
* Copyright (C) 2002, 2003 H. Peter Anvin
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 21/34] md: avoid useless else after break or return [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (19 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 20/34] md: avoid pointless filenames in files [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 22/34] md: don't indent labels [WARNING] heinzm
` (13 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 5 +++--
drivers/md/md-cluster.c | 5 +++--
drivers/md/md.c | 31 ++++++++++++++++---------------
drivers/md/raid10.c | 18 +++++++++---------
4 files changed, 31 insertions(+), 28 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 65e77a7e3656..e739efe2249d 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2188,8 +2188,9 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
blocks = old_counts.chunks << old_counts.chunkshift;
pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
break;
- } else
- bitmap->counts.bp[page].count += 1;
+ }
+
+ bitmap->counts.bp[page].count += 1;
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 762160e81ce8..e115603ff0d9 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -174,8 +174,9 @@ static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode,
pr_info("failed to cancel previous lock request "
"%s return %d\n", res->name, ret);
return -EPERM;
- } else
- res->sync_locking_done = false;
+ }
+
+ res->sync_locking_done = false;
if (res->lksb.sb_status == 0)
res->mode = mode;
return res->lksb.sb_status;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e19edfe62516..37f1323306aa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2180,7 +2180,6 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb_start &= ~(sector_t)(4*2 - 1);
bm_space = super_1_choose_bm_space(dev_size);
-
/* Space that can be used to store date needs to decrease
* superblock bitmap space and bad block space(4K)
*/
@@ -3200,8 +3199,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err) {
rdev->raid_disk = -1;
return err;
- } else
- sysfs_notify_dirent_safe(rdev->sysfs_state);
+ }
+
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
/* failure here is OK */;
sysfs_link_rdev(rdev->mddev, rdev);
/* don't wakeup anyone, leave that to userspace. */
@@ -7359,13 +7359,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
- else {
- mddev->new_layout = info->layout;
- rv = mddev->pers->check_reshape(mddev);
- if (rv)
- mddev->new_layout = mddev->layout;
- return rv;
- }
+
+ mddev->new_layout = info->layout;
+ rv = mddev->pers->check_reshape(mddev);
+ if (rv)
+ mddev->new_layout = mddev->layout;
+ return rv;
}
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -7689,16 +7688,18 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (mddev->pers) {
mdu_disk_info_t info;
- if (copy_from_user(&info, argp, sizeof(info)))
+ if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
- else if (!(info.state & (1<<MD_DISK_SYNC)))
+ goto unlock;
+ }
+
+ if (!(info.state & (1<<MD_DISK_SYNC)))
/* Need to clear read-only for this */
break;
- else
- err = md_add_new_disk(mddev, &info);
+
+ err = md_add_new_disk(mddev, &info);
goto unlock;
}
- break;
}
/*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dbad26fcca12..7ab011cdb995 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2325,6 +2325,7 @@ static void end_reshape_read(struct bio *bio)
static void end_sync_request(struct r10bio *r10_bio)
{
struct mddev *mddev = r10_bio->mddev;
+ struct r10bio *r10_bio2;
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
@@ -2338,16 +2339,15 @@ static void end_sync_request(struct r10bio *r10_bio)
put_buf(r10_bio);
md_done_sync(mddev, s, 1);
break;
- } else {
- struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
-
- if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
- test_bit(R10BIO_WriteError, &r10_bio->state))
- reschedule_retry(r10_bio);
- else
- put_buf(r10_bio);
- r10_bio = r10_bio2;
}
+
+ r10_bio2 = (struct r10bio *)r10_bio->master_bio;
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ put_buf(r10_bio);
+ r10_bio = r10_bio2;
}
}
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 22/34] md: don't indent labels [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (20 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 21/34] md: avoid useless else after break or return [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 23/34] md: fix code indent for conditional statements [WARNING] heinzm
` (12 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 8 ++++----
drivers/md/raid10.c | 2 +-
drivers/md/raid5.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 37f1323306aa..b68b6d9dd8b6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8818,7 +8818,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = MD_RESYNC_DELAYED;
- try_again:
+try_again:
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
spin_lock(&all_mddevs_lock);
@@ -9033,7 +9033,7 @@ void md_do_sync(struct md_thread *thread)
continue;
last_check = io_sectors;
- repeat:
+repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP)) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
@@ -9476,7 +9476,7 @@ void md_check_recovery(struct mddev *mddev)
queue_work(md_misc_wq, &mddev->del_work);
goto unlock;
}
- not_running:
+not_running:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
wake_up(&resync_wait);
@@ -9485,7 +9485,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
- unlock:
+unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7ab011cdb995..a95609d5e79c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -5194,7 +5194,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
rcu_read_lock();
if (success)
break;
- failed:
+failed:
slot++;
if (slot >= conf->copies)
slot = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a7b37a4e3f66..a1da82a72553 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5864,7 +5864,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
logical_sector += RAID5_STRIPE_SECTORS(conf)) {
DEFINE_WAIT(w);
int d;
- again:
+again:
sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 23/34] md: fix code indent for conditional statements [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (21 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 22/34] md: don't indent labels [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 24/34] md: prefer octal permissions [WARNING] heinzm
` (11 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 2 +-
drivers/md/md.c | 8 ++++----
drivers/md/raid5.c | 4 ++--
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index e739efe2249d..b78b3647c4e7 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2327,7 +2327,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
long long offset;
if (strncmp(buf, "none", 4) == 0)
- /* nothing to be done */;
+ ; /* nothing to be done */
else if (strncmp(buf, "file:", 5) == 0) {
/* Not supported yet */
rv = -EINVAL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b68b6d9dd8b6..858cbb5252df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2053,7 +2053,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
if (rdev->badblocks.count == 0)
- /* Nothing to do for bad blocks*/;
+ ; /* Nothing to do for bad blocks*/
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
@@ -2676,7 +2676,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
!test_bit(Journal, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
+ rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
@@ -5854,7 +5854,7 @@ int md_run(struct mddev *mddev)
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
- /* Nothing to check */;
+ ; /* Nothing to check */
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors > rdev->sb_start) {
@@ -6151,7 +6151,7 @@ static int restart_array(struct mddev *mddev)
rcu_read_unlock();
if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
/* Don't restart rw with journal missing/faulty */
- return -EINVAL;
+ return -EINVAL;
if (has_readonly)
return -EROFS;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a1da82a72553..f418035da889 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4811,7 +4811,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
clear_bit(R5_Insync, &dev->flags);
if (!rdev)
- /* Not in-sync */;
+ ; /* Not in-sync */
else if (is_bad) {
/* also not in-sync */
if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -7888,7 +7888,7 @@ static int raid5_run(struct mddev *mddev)
*/
if (abs(min_offset_diff) >= mddev->chunk_sectors &&
abs(min_offset_diff) >= mddev->new_chunk_sectors)
- /* not really in-place - so OK */;
+ ; /* not really in-place - so OK */
else if (mddev->ro == 0) {
pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
mdname(mddev));
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 24/34] md: prefer octal permissions [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (22 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 23/34] md: fix code indent for conditional statements [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 25/34] md: remove bogus IS_ENABLED() macro [WARNING] heinzm
` (10 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 28 +++----
drivers/md/md.c | 176 ++++++++++++++++++++---------------------
drivers/md/raid5-ppl.c | 4 +-
drivers/md/raid5.c | 29 +++----
4 files changed, 106 insertions(+), 131 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b78b3647c4e7..25895ec7d89a 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2387,8 +2387,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_location =
-__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
+static struct md_sysfs_entry bitmap_location = __ATTR(location, 0644, location_show, location_store);
/* 'bitmap/space' is the space available at 'location' for the
* bitmap. This allows the kernel to know when it is safe to
@@ -2424,8 +2423,7 @@ space_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_space =
-__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
+static struct md_sysfs_entry bitmap_space = __ATTR(space, 0644, space_show, space_store);
static ssize_t
timeout_show(struct mddev *mddev, char *page)
@@ -2475,8 +2473,7 @@ timeout_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_timeout =
-__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
+static struct md_sysfs_entry bitmap_timeout = __ATTR(time_base, 0644, timeout_show, timeout_store);
static ssize_t
backlog_show(struct mddev *mddev, char *page)
@@ -2531,8 +2528,7 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_backlog =
-__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
+static struct md_sysfs_entry bitmap_backlog = __ATTR(backlog, 0644, backlog_show, backlog_store);
static ssize_t
chunksize_show(struct mddev *mddev, char *page)
@@ -2562,8 +2558,8 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_chunksize =
-__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
+static struct md_sysfs_entry bitmap_chunksize = __ATTR(chunksize, 0644,
+ chunksize_show, chunksize_store);
static ssize_t metadata_show(struct mddev *mddev, char *page)
{
@@ -2589,8 +2585,7 @@ static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_metadata =
-__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+static struct md_sysfs_entry bitmap_metadata = __ATTR(metadata, 0644, metadata_show, metadata_store);
static ssize_t can_clear_show(struct mddev *mddev, char *page)
{
@@ -2621,8 +2616,8 @@ static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry bitmap_can_clear =
-__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+static struct md_sysfs_entry bitmap_can_clear = __ATTR(can_clear, 0644,
+ can_clear_show, can_clear_store);
static ssize_t
behind_writes_used_show(struct mddev *mddev, char *page)
@@ -2647,8 +2642,9 @@ behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry max_backlog_used =
-__ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset);
+static struct md_sysfs_entry max_backlog_used = __ATTR(max_backlog_used, 0644,
+ behind_writes_used_show,
+ behind_writes_used_reset);
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 858cbb5252df..a495fad762ae 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -307,14 +307,14 @@ static struct ctl_table raid_table[] = {
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
@@ -324,7 +324,7 @@ static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
- .mode = S_IRUGO|S_IXUGO,
+ .mode = 0555,
.child = raid_table,
},
{ }
@@ -3100,8 +3100,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
-static struct rdev_sysfs_entry rdev_state =
-__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
+
+static struct rdev_sysfs_entry rdev_state = __ATTR_PREALLOC(state, 0644, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
@@ -3121,8 +3121,8 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
atomic_set(&rdev->corrected_errors, n);
return len;
}
-static struct rdev_sysfs_entry rdev_errors =
-__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
+
+static struct rdev_sysfs_entry rdev_errors = __ATTR(errors, 0644, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
@@ -3219,8 +3219,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
-static struct rdev_sysfs_entry rdev_slot =
-__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
+static struct rdev_sysfs_entry rdev_slot = __ATTR(slot, 0644, slot_show, slot_store);
static ssize_t
offset_show(struct md_rdev *rdev, char *page)
@@ -3245,8 +3244,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
-static struct rdev_sysfs_entry rdev_offset =
-__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
+static struct rdev_sysfs_entry rdev_offset = __ATTR(offset, 0644, offset_show, offset_store);
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
@@ -3301,8 +3299,9 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
return len;
}
-static struct rdev_sysfs_entry rdev_new_offset =
-__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
+
+static struct rdev_sysfs_entry rdev_new_offset = __ATTR(new_offset, 0644,
+ new_offset_show, new_offset_store);
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
@@ -3409,8 +3408,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
-static struct rdev_sysfs_entry rdev_size =
-__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
+static struct rdev_sysfs_entry rdev_size = __ATTR(size, 0644, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
@@ -3444,8 +3442,9 @@ static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_
return len;
}
-static struct rdev_sysfs_entry rdev_recovery_start =
-__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+static struct rdev_sysfs_entry rdev_recovery_start = __ATTR(recovery_start, 0644,
+ recovery_start_show,
+ recovery_start_store);
/* sysfs access to bad-blocks list.
* We present two files.
@@ -3470,8 +3469,7 @@ static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
wake_up(&rdev->blocked_wait);
return rv;
}
-static struct rdev_sysfs_entry rdev_bad_blocks =
-__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
+static struct rdev_sysfs_entry rdev_bad_blocks = __ATTR(bad_blocks, 0644, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
@@ -3481,8 +3479,8 @@ static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
{
return badblocks_store(&rdev->badblocks, page, len, 1);
}
-static struct rdev_sysfs_entry rdev_unack_bad_blocks =
-__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
+static struct rdev_sysfs_entry rdev_unack_bad_blocks = __ATTR(unacknowledged_bad_blocks, 0644,
+ ubb_show, ubb_store);
static ssize_t
ppl_sector_show(struct md_rdev *rdev, char *page)
@@ -3520,8 +3518,8 @@ ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
-static struct rdev_sysfs_entry rdev_ppl_sector =
-__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
+static struct rdev_sysfs_entry rdev_ppl_sector = __ATTR(ppl_sector, 0644,
+ ppl_sector_show, ppl_sector_store);
static ssize_t
ppl_size_show(struct md_rdev *rdev, char *page)
@@ -3553,8 +3551,7 @@ ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
-static struct rdev_sysfs_entry rdev_ppl_size =
-__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
+static struct rdev_sysfs_entry rdev_ppl_size = __ATTR(ppl_size, 0644, ppl_size_show, ppl_size_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
@@ -3861,8 +3858,9 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
}
return len;
}
-static struct md_sysfs_entry md_safe_delay =
-__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR, safe_delay_show, safe_delay_store);
+
+static struct md_sysfs_entry md_safe_delay = __ATTR(safe_mode_delay, 0644,
+ safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
@@ -4088,8 +4086,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
return rv;
}
-static struct md_sysfs_entry md_level =
-__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
+static struct md_sysfs_entry md_level = __ATTR(level, 0644, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
@@ -4134,8 +4131,7 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
mddev_unlock(mddev);
return err ?: len;
}
-static struct md_sysfs_entry md_layout =
-__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
+static struct md_sysfs_entry md_layout = __ATTR(layout, 0644, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
@@ -4189,16 +4185,17 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
mddev_unlock(mddev);
return err ? err : len;
}
-static struct md_sysfs_entry md_raid_disks =
-__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
+
+static struct md_sysfs_entry md_raid_disks = __ATTR(raid_disks, 0644,
+ raid_disks_show, raid_disks_store);
static ssize_t
uuid_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%pU\n", mddev->uuid);
}
-static struct md_sysfs_entry md_uuid =
-__ATTR(uuid, S_IRUGO, uuid_show, NULL);
+
+static struct md_sysfs_entry md_uuid = __ATTR(uuid, 0444, uuid_show, NULL);
static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
@@ -4243,8 +4240,8 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
mddev_unlock(mddev);
return err ?: len;
}
-static struct md_sysfs_entry md_chunk_size =
-__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
+static struct md_sysfs_entry md_chunk_size = __ATTR(chunk_size, 0644,
+ chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
@@ -4285,7 +4282,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_resync_start =
-__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
+__ATTR_PREALLOC(resync_start, 0644,
resync_start_show, resync_start_store);
/*
@@ -4500,7 +4497,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
-__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
+__ATTR_PREALLOC(array_state, 0644, array_state_show, array_state_store);
static ssize_t max_corrected_read_errors_show(struct mddev *mddev, char *page)
{
@@ -4520,9 +4517,9 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len
return len;
}
-static struct md_sysfs_entry max_corr_read_errors =
-__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
- max_corrected_read_errors_store);
+static struct md_sysfs_entry max_corr_read_errors = __ATTR(max_read_errors, 0644,
+ max_corrected_read_errors_show,
+ max_corrected_read_errors_store);
static ssize_t
null_show(struct mddev *mddev, char *page)
@@ -4606,8 +4603,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
return err ? err : len;
}
-static struct md_sysfs_entry md_new_device =
-__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
+static struct md_sysfs_entry md_new_device = __ATTR(new_dev, 0200, null_show, new_dev_store);
static ssize_t
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
@@ -4643,8 +4639,7 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry md_bitmap =
-__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
+static struct md_sysfs_entry md_bitmap = __ATTR(bitmap_set_bits, 0200, null_show, bitmap_store);
static ssize_t
size_show(struct mddev *mddev, char *page)
@@ -4685,8 +4680,7 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
return err ? err : len;
}
-static struct md_sysfs_entry md_size =
-__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
+static struct md_sysfs_entry md_size = __ATTR(component_size, 0644, size_show, size_store);
/* Metadata version.
* This is one of
@@ -4770,8 +4764,8 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_metadata =
-__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+static struct md_sysfs_entry md_metadata = __ATTR_PREALLOC(metadata_version, 0644,
+ metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
@@ -4882,8 +4876,8 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return len;
}
-static struct md_sysfs_entry md_scan_mode =
-__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
+static struct md_sysfs_entry md_scan_mode = __ATTR_PREALLOC(sync_action, 0644,
+ action_show, action_store);
static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
@@ -4929,8 +4923,8 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry md_sync_min =
-__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
+static struct md_sysfs_entry md_sync_min = __ATTR(sync_speed_min, 0644,
+ sync_min_show, sync_min_store);
static ssize_t
sync_max_show(struct mddev *mddev, char *page)
@@ -4958,8 +4952,8 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry md_sync_max =
-__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
+static struct md_sysfs_entry md_sync_max = __ATTR(sync_speed_max, 0644,
+ sync_max_show, sync_max_store);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
@@ -4994,9 +4988,9 @@ sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
}
/* force parallel resync, even with shared block devices */
-static struct md_sysfs_entry md_sync_force_parallel =
-__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
- sync_force_parallel_show, sync_force_parallel_store);
+static struct md_sysfs_entry md_sync_force_parallel = __ATTR(sync_force_parallel, 0644,
+ sync_force_parallel_show,
+ sync_force_parallel_store);
static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
@@ -5037,8 +5031,8 @@ sync_completed_show(struct mddev *mddev, char *page)
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
-static struct md_sysfs_entry md_sync_completed =
- __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
+static struct md_sysfs_entry md_sync_completed = __ATTR_PREALLOC(sync_completed, 0444,
+ sync_completed_show, NULL);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
@@ -5073,8 +5067,7 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_min_sync =
-__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
+static struct md_sysfs_entry md_min_sync = __ATTR(sync_min, 0644, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(struct mddev *mddev, char *page)
@@ -5126,8 +5119,7 @@ max_sync_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_max_sync =
-__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
+static struct md_sysfs_entry md_max_sync = __ATTR(sync_max, 0644, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(struct mddev *mddev, char *page)
@@ -5163,8 +5155,9 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
mddev_unlock(mddev);
return err ?: len;
}
-static struct md_sysfs_entry md_suspend_lo =
-__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
+
+static struct md_sysfs_entry md_suspend_lo = __ATTR(suspend_lo, 0644,
+ suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
@@ -5200,8 +5193,9 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
mddev_unlock(mddev);
return err ?: len;
}
-static struct md_sysfs_entry md_suspend_hi =
-__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
+
+static struct md_sysfs_entry md_suspend_hi = __ATTR(suspend_hi, 0644,
+ suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(struct mddev *mddev, char *page)
@@ -5245,9 +5239,9 @@ reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_reshape_position =
-__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
- reshape_position_store);
+static struct md_sysfs_entry md_reshape_position = __ATTR(reshape_position, 0644,
+ reshape_position_show,
+ reshape_position_store);
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
@@ -5286,9 +5280,9 @@ reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_reshape_direction =
-__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
- reshape_direction_store);
+static struct md_sysfs_entry md_reshape_direction = __ATTR(reshape_direction, 0644,
+ reshape_direction_show,
+ reshape_direction_store);
static ssize_t
array_size_show(struct mddev *mddev, char *page)
@@ -5342,9 +5336,8 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_array_size =
-__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
- array_size_store);
+static struct md_sysfs_entry md_array_size = __ATTR(array_size, 0644,
+ array_size_show, array_size_store);
static ssize_t
consistency_policy_show(struct mddev *mddev, char *page)
@@ -5388,9 +5381,9 @@ consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
return err ? err : len;
}
-static struct md_sysfs_entry md_consistency_policy =
-__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
- consistency_policy_store);
+static struct md_sysfs_entry md_consistency_policy = __ATTR(consistency_policy, 0644,
+ consistency_policy_show,
+ consistency_policy_store);
static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
{
@@ -5416,9 +5409,8 @@ fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
return len;
}
-static struct md_sysfs_entry md_fail_last_dev =
-__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
- fail_last_dev_store);
+static struct md_sysfs_entry md_fail_last_dev = __ATTR(fail_last_dev, 0644,
+ fail_last_dev_show, fail_last_dev_store);
static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
{
@@ -5466,9 +5458,9 @@ serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
return err ?: len;
}
-static struct md_sysfs_entry md_serialize_policy =
-__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
- serialize_policy_store);
+static struct md_sysfs_entry md_serialize_policy = __ATTR(serialize_policy, 0644,
+ serialize_policy_show,
+ serialize_policy_store);
static struct attribute *md_default_attrs[] = {
@@ -9671,7 +9663,7 @@ static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
- proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
+ proc_create("mdstat", 0444, NULL, &mdstat_proc_ops);
}
static int __init md_init(void)
@@ -10022,10 +10014,10 @@ static int set_ro(const char *val, const struct kernel_param *kp)
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
-module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
-module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
-module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
-module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
+module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
+module_param(start_dirty_degraded, int, 0644);
+module_param_call(new_array, add_named_array, NULL, NULL, 0200);
+module_param(create_on_open, bool, 0600);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index cfff345951db..8e19cbfc32ca 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1528,6 +1528,4 @@ ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
}
struct md_sysfs_entry
-ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
- ppl_write_hint_show,
- ppl_write_hint_store);
+ppl_write_hint = __ATTR(ppl_write_hint, 0644, ppl_write_hint_show, ppl_write_hint_store);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f418035da889..b41d0918b914 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6938,9 +6938,8 @@ raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
- raid5_show_stripe_cache_size,
- raid5_store_stripe_cache_size);
+raid5_stripecache_size = __ATTR(stripe_cache_size, 0644,
+ raid5_show_stripe_cache_size, raid5_store_stripe_cache_size);
static ssize_t
raid5_show_rmw_level(struct mddev *mddev, char *page)
@@ -6981,9 +6980,7 @@ raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
- raid5_show_rmw_level,
- raid5_store_rmw_level);
+raid5_rmw_level = __ATTR(rmw_level, 0644, raid5_show_rmw_level, raid5_store_rmw_level);
static ssize_t
raid5_show_stripe_size(struct mddev *mddev, char *page)
@@ -7070,14 +7067,10 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_stripe_size = __ATTR(stripe_size, 0644,
- raid5_show_stripe_size,
- raid5_store_stripe_size);
+raid5_stripe_size = __ATTR(stripe_size, 0644, raid5_show_stripe_size, raid5_store_stripe_size);
#else
static struct md_sysfs_entry
-raid5_stripe_size = __ATTR(stripe_size, 0444,
- raid5_show_stripe_size,
- NULL);
+raid5_stripe_size = __ATTR(stripe_size, 0444, raid5_show_stripe_size, NULL);
#endif
static ssize_t
@@ -7121,8 +7114,7 @@ raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
- S_IRUGO | S_IWUSR,
+raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 0644,
raid5_show_preread_threshold,
raid5_store_preread_threshold);
@@ -7175,9 +7167,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
- raid5_show_skip_copy,
- raid5_store_skip_copy);
+raid5_skip_copy = __ATTR(skip_copy, 0644, raid5_show_skip_copy, raid5_store_skip_copy);
static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
@@ -7260,9 +7250,8 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
}
static struct md_sysfs_entry
-raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
- raid5_show_group_thread_cnt,
- raid5_store_group_thread_cnt);
+raid5_group_thread_cnt = __ATTR(group_thread_cnt, 0644,
+ raid5_show_group_thread_cnt, raid5_store_group_thread_cnt);
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 25/34] md: remove bogus IS_ENABLED() macro [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (23 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 24/34] md: prefer octal permissions [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 26/34] md autodetect: correct placement of __initdata [WARNING] heinzm
` (9 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
include/linux/raid/pq.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 41c525e4c959..9e6171c4e7ed 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -64,7 +64,6 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define subsys_initcall(x)
#define module_exit(x)
-#define IS_ENABLED(x) (x)
#define CONFIG_RAID6_PQ_BENCHMARK 1
#endif /* __KERNEL__ */
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 26/34] md autodetect: correct placement of __initdata [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (24 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 25/34] md: remove bogus IS_ENABLED() macro [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 27/34] md: prefer using "%s...", __func__ [WARNING] heinzm
` (8 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-autodetect.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index ff60c2272919..868be6f32191 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -21,11 +21,11 @@
*/
#ifdef CONFIG_MD_AUTODETECT
-static int __initdata raid_noautodetect;
+static int raid_noautodetect __initdata;
#else
-static int __initdata raid_noautodetect = 1;
+static int raid_noautodetect __initdata = 1;
#endif
-static int __initdata raid_autopart;
+static int raid_autopart __initdata;
static struct md_setup_args {
int minor;
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 27/34] md: prefer using "%s...", __func__ [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (25 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 26/34] md autodetect: correct placement of __initdata [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 28/34] md pq: adjust __attribute__ [WARNING] heinzm
` (7 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-multipath.c | 2 +-
drivers/md/md.c | 4 ++--
drivers/md/raid5.c | 17 +++++++----------
3 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 6cc169abef00..34fd6db61d79 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -45,7 +45,7 @@ static int multipath_map(struct mpconf *conf)
}
rcu_read_unlock();
- pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
+ pr_crit_ratelimited("%s(): no more operational IO paths?\n", __func__);
return (-1);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a495fad762ae..6a4d01efaca5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2487,7 +2487,7 @@ void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev *rdev)
{
- pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
+ pr_debug("md: %s(%pg)\n", __func__, rdev->bdev);
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
@@ -2763,7 +2763,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
mdname(mddev), mddev->in_sync);
if (mddev->queue)
- blk_add_trace_msg(mddev->queue, "md md_update_sb");
+ blk_add_trace_msg(mddev->queue, "md %s", __func__);
rewrite:
md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b41d0918b914..84e4eaa937cf 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -426,8 +426,7 @@ void raid5_release_stripe(struct stripe_head *sh)
static inline void remove_hash(struct stripe_head *sh)
{
- pr_debug("remove_hash(), stripe %llu\n",
- (unsigned long long)sh->sector);
+ pr_debug("%s(), stripe %llu\n", __func__, (unsigned long long)sh->sector);
hlist_del_init(&sh->hash);
}
@@ -436,8 +435,7 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
{
struct hlist_head *hp = stripe_hash(conf, sh->sector);
- pr_debug("insert_hash(), stripe %llu\n",
- (unsigned long long)sh->sector);
+ pr_debug("%s(), stripe %llu\n", __func__, (unsigned long long)sh->sector);
hlist_add_head(&sh->hash, hp);
}
@@ -587,8 +585,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(stripe_operations_active(sh));
BUG_ON(sh->batch_head);
- pr_debug("init_stripe called, stripe %llu\n",
- (unsigned long long)sector);
+ pr_debug("%s called, stripe %llu\n", __func__, (unsigned long long)sector);
retry:
seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
@@ -624,7 +621,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
{
struct stripe_head *sh;
- pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
+ pr_debug("%s, sector %llu\n", __func__, (unsigned long long)sector);
hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->generation == generation)
return sh;
@@ -5537,7 +5534,7 @@ static void raid5_align_endio(struct bio *bi)
return;
}
- pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
+ pr_debug("%s : io error...handing IO for a retry\n", __func__);
add_bio_to_retry(raid_bi, conf);
}
@@ -6783,7 +6780,7 @@ static void raid5d(struct md_thread *thread)
int handled;
struct blk_plug plug;
- pr_debug("+++ raid5d active\n");
+ pr_debug("+++ %s active\n", __func__);
md_check_recovery(mddev);
@@ -6865,7 +6862,7 @@ static void raid5d(struct md_thread *thread)
async_tx_issue_pending_all();
blk_finish_plug(&plug);
- pr_debug("--- raid5d inactive\n");
+ pr_debug("--- %s inactive\n", __func__);
}
static ssize_t
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 28/34] md pq: adjust __attribute__ [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (26 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 27/34] md: prefer using "%s...", __func__ [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 29/34] md: prefer 'unsigned int' [WARNING] heinzm
` (6 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
include/linux/raid/pq.h | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 9e6171c4e7ed..da280aae74af 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -44,10 +44,6 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define __exit
-#ifndef __attribute_const__
-# define __attribute_const__ __attribute__((const))
-#endif
-#define noinline __attribute__((noinline))
#define preempt_enable()
#define preempt_disable()
@@ -141,12 +137,12 @@ int raid6_select_algo(void);
#define RAID6_PQ_BAD 3
/* Galois field tables */
-extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
-extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
-extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
-extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
-extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
-extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfmul[256][256] __aligned(256);
+extern const u8 raid6_vgfmul[256][32] __aligned(256);
+extern const u8 raid6_gfexp[256] __aligned(256);
+extern const u8 raid6_gflog[256] __aligned(256);
+extern const u8 raid6_gfinv[256] __aligned(256);
+extern const u8 raid6_gfexi[256] __aligned(256);
/* Recovery routines */
extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 29/34] md: prefer 'unsigned int' [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (27 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 28/34] md pq: adjust __attribute__ [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 30/34] md: prefer kmap_local_page() instead of deprecated kmap_atomic() [WARNING] heinzm
` (5 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 3 +--
drivers/md/raid0.c | 6 +++---
drivers/md/raid1-10.c | 3 +--
drivers/md/raid1.c | 2 +-
drivers/md/raid5.c | 4 ++--
5 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6a4d01efaca5..9b734720b9c1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2064,8 +2064,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
- unsigned seq;
-
+ unsigned int seq;
retry:
seq = read_seqbegin(&bb->lock);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b4c372c6861b..cedb91f84b69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct strip_zone *zone;
int cnt;
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned blksize = 512;
+ unsigned int blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
@@ -519,8 +519,8 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
sector_t bio_sector;
sector_t sector;
sector_t orig_sector;
- unsigned chunk_sects;
- unsigned sectors;
+ unsigned int chunk_sects;
+ unsigned int sectors;
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
&& md_flush_request(mddev, bio))
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index e61f6cad4e08..8658f474bf2e 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -73,8 +73,7 @@ static inline void resync_get_all_pages(struct resync_pages *rp)
get_page(rp->pages[i]);
}
-static inline struct page *resync_fetch_page(struct resync_pages *rp,
- unsigned idx)
+static inline struct page *resync_fetch_page(struct resync_pages *rp, unsigned int idx)
{
if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
return NULL;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index bd245f41393a..8097e01cd63b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1128,7 +1128,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
struct bio *bio)
{
int size = bio->bi_iter.bi_size;
- unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned int vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
int i = 0;
struct bio *behind_bio = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 84e4eaa937cf..557398bf4c72 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5632,8 +5632,8 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
struct bio *split;
sector_t sector = raid_bio->bi_iter.bi_sector;
- unsigned chunk_sects = mddev->chunk_sectors;
- unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
+ unsigned int chunk_sects = mddev->chunk_sectors;
+ unsigned int sectors = chunk_sects - (sector & (chunk_sects-1));
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 30/34] md: prefer kmap_local_page() instead of deprecated kmap_atomic() [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (28 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 29/34] md: prefer 'unsigned int' [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 31/34] md raid5: prefer 'int' instead of 'signed' [WARNING] heinzm
` (4 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 42 ++++++++++++++++++++--------------------
drivers/md/md-cluster.c | 10 +++++-----
drivers/md/raid5-cache.c | 16 +++++++--------
3 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 25895ec7d89a..dd752f04e3af 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -450,7 +450,7 @@ void md_bitmap_update_sb(struct bitmap *bitmap)
return;
if (!bitmap->storage.sb_page) /* no superblock */
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */
@@ -469,7 +469,7 @@ void md_bitmap_update_sb(struct bitmap *bitmap)
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->bitmap_info.space);
- kunmap_atomic(sb);
+ kunmap_local(sb);
write_page(bitmap, bitmap->storage.sb_page, 1);
}
EXPORT_SYMBOL(md_bitmap_update_sb);
@@ -481,7 +481,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->storage.sb_page)
return;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %u\n", le32_to_cpu(sb->version));
@@ -500,7 +500,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
- kunmap_atomic(sb);
+ kunmap_local(sb);
}
/*
@@ -524,7 +524,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_page->index = 0;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -532,7 +532,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
- kunmap_atomic(sb);
+ kunmap_local(sb);
pr_warn("bitmap chunksize not a power of 2\n");
return -EINVAL;
}
@@ -567,7 +567,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->mddev->bitmap_info.nodes = 0;
- kunmap_atomic(sb);
+ kunmap_local(sb);
return 0;
}
@@ -631,7 +631,7 @@ static int md_bitmap_read_sb(struct bitmap *bitmap)
return err;
err = -EINVAL;
- sb = kmap_atomic(sb_page);
+ sb = kmap_local_page(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -698,7 +698,7 @@ static int md_bitmap_read_sb(struct bitmap *bitmap)
err = 0;
out:
- kunmap_atomic(sb);
+ kunmap_local(sb);
if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
/* Assigning chunksize is required for "re_read" */
bitmap->mddev->bitmap_info.chunksize = chunksize;
@@ -939,12 +939,12 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
bit = file_page_offset(&bitmap->storage, chunk);
/* set the bit */
- kaddr = kmap_atomic(page);
+ kaddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set_bit(bit, kaddr);
else
set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
@@ -966,12 +966,12 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
clear_bit(bit, paddr);
else
clear_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
@@ -990,12 +990,12 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
if (!page)
return -EINVAL;
bit = file_page_offset(&bitmap->storage, chunk);
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set = test_bit(bit, paddr);
else
set = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
return set;
}
@@ -1134,10 +1134,10 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
* if bitmap is out of date, dirty the
* whole page and write it out
*/
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
memset(paddr + offset, 0xff,
PAGE_SIZE - offset);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
write_page(bitmap, page, 1);
ret = -EIO;
@@ -1146,12 +1146,12 @@ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
goto err;
}
}
- paddr = kmap_atomic(page);
+ paddr = kmap_local_page(page);
if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
b = test_bit(bit, paddr);
else
b = test_bit_le(bit, paddr);
- kunmap_atomic(paddr);
+ kunmap_local(paddr);
if (b) {
/* if the disk bit is set, set the memory bit */
int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
@@ -1273,10 +1273,10 @@ void md_bitmap_daemon_work(struct mddev *mddev)
bitmap->need_sync = 0;
if (bitmap->storage.filemap) {
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
sb->events_cleared =
cpu_to_le64(bitmap->events_cleared);
- kunmap_atomic(sb);
+ kunmap_local(sb);
set_page_attr(bitmap, 0,
BITMAP_PAGE_NEEDWRITE);
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index e115603ff0d9..ac3a8afc28ee 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1203,9 +1203,9 @@ static int cluster_check_sync_size(struct mddev *mddev)
char str[64];
struct dlm_lock_resource *bm_lockres;
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
my_sync_size = sb->sync_size;
- kunmap_atomic(sb);
+ kunmap_local(sb);
for (i = 0; i < node_num; i++) {
if (i == current_slot)
@@ -1234,15 +1234,15 @@ static int cluster_check_sync_size(struct mddev *mddev)
md_bitmap_update_sb(bitmap);
lockres_free(bm_lockres);
- sb = kmap_atomic(bitmap->storage.sb_page);
+ sb = kmap_local_page(bitmap->storage.sb_page);
if (sync_size == 0)
sync_size = sb->sync_size;
else if (sync_size != sb->sync_size) {
- kunmap_atomic(sb);
+ kunmap_local(sb);
md_bitmap_free(bitmap);
return -1;
}
- kunmap_atomic(sb);
+ kunmap_local(sb);
md_bitmap_free(bitmap);
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index f40ee2101796..3da3e96d61f7 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1021,10 +1021,10 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
/* checksum is already calculated in last run */
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
data_pages = write_disks - parity_pages;
@@ -1983,9 +1983,9 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
u32 checksum;
r5l_recovery_read_page(log, ctx, page, log_offset);
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2386,11 +2386,11 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
payload->size = cpu_to_le32(BLOCK_SECTORS);
payload->location = cpu_to_le64(
raid5_compute_blocknr(sh, i, 0));
- addr = kmap_atomic(dev->page);
+ addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
crc32c_le(log->uuid_checksum, addr,
PAGE_SIZE));
- kunmap_atomic(addr);
+ kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
write_pos = r5l_ring_add(log, write_pos,
@@ -2899,10 +2899,10 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
- addr = kmap_atomic(sh->dev[i].page);
+ addr = kmap_local_page(sh->dev[i].page);
sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
addr, PAGE_SIZE);
- kunmap_atomic(addr);
+ kunmap_local(addr);
pages++;
}
WARN_ON(pages == 0);
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 31/34] md raid5: prefer 'int' instead of 'signed' [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (29 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 30/34] md: prefer kmap_local_page() instead of deprecated kmap_atomic() [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 32/34] md: prefer kvmalloc_array() with multiply [WARNING] heinzm
` (3 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/raid5.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 557398bf4c72..fb6e0cf727a6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1393,9 +1393,9 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
struct r5conf *conf = sh->raid_conf;
if (bio->bi_iter.bi_sector >= sector)
- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
+ page_offset = (int)(bio->bi_iter.bi_sector - sector) * 512;
else
- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
+ page_offset = (int)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 32/34] md: prefer kvmalloc_array() with multiply [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (30 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 31/34] md raid5: prefer 'int' instead of 'signed' [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 33/34] md: avoid splitting quoted strings [WARNING] heinzm
` (2 subsequent siblings)
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9b734720b9c1..3d17773e058f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -164,8 +164,7 @@ static int rdev_init_serial(struct md_rdev *rdev)
if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
- GFP_KERNEL);
+ serial = kvmalloc_array(serial_nums, sizeof(struct serial_in_rdev), GFP_KERNEL);
if (!serial)
return -ENOMEM;
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 33/34] md: avoid splitting quoted strings [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (31 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 32/34] md: prefer kvmalloc_array() with multiply [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-06 21:27 ` [PATCH 34/34] md: avoid return in void functions [WARNING] heinzm
2023-03-07 1:18 ` [PATCH 00/34] address various checkpatch.pl requirements Guoqing Jiang
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md-bitmap.c | 3 +--
drivers/md/md-cluster.c | 4 ++--
drivers/md/raid0.c | 6 ++----
drivers/md/raid1.c | 3 +--
drivers/md/raid10.c | 3 +--
drivers/md/raid5.c | 17 +++++++----------
6 files changed, 14 insertions(+), 22 deletions(-)
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index dd752f04e3af..0ae7ab8244d7 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2041,8 +2041,7 @@ void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
counts = &bitmap->counts;
chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
- seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
- "%lu%s chunk",
+ seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk",
counts->pages - counts->missing_pages,
counts->pages,
(counts->pages - counts->missing_pages)
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index ac3a8afc28ee..c2d568194fb4 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -171,8 +171,8 @@ static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode,
&res->lksb, res);
res->sync_locking_done = false;
if (unlikely(ret != 0))
- pr_info("failed to cancel previous lock request "
- "%s return %d\n", res->name, ret);
+ pr_info("failed to cancel previous lock request %s return %d\n",
+ res->name, ret);
return -EPERM;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index cedb91f84b69..f8897ab4baeb 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -90,8 +90,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->bdev->bd_disk->queue));
rdev_for_each(rdev2, mddev) {
- pr_debug("md/raid0:%s: comparing %pg(%llu)"
- " with %pg(%llu)\n",
+ pr_debug("md/raid0:%s: comparing %pg(%llu) with %pg(%llu)\n",
mdname(mddev),
rdev1->bdev,
(unsigned long long)rdev1->sectors,
@@ -227,8 +226,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev->bdev);
continue;
}
- pr_debug("md/raid0:%s: checking %pg ..."
- " contained as device %d\n",
+ pr_debug("md/raid0:%s: checking %pg ... contained as device %d\n",
mdname(mddev),
rdev->bdev, c);
dev[c] = rdev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8097e01cd63b..2f8fba6e9756 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -544,8 +544,7 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
struct bio *mbio = r1_bio->master_bio;
- pr_debug("raid1: behind end write sectors"
- " %llu-%llu\n",
+ pr_debug("raid1: behind end write sectors %llu-%llu\n",
(unsigned long long) mbio->bi_iter.bi_sector,
(unsigned long long) bio_end_sector(mbio) - 1);
call_bio_endio(r1_bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95609d5e79c..60d7b1af229e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4168,8 +4168,7 @@ static int raid10_run(struct mddev *mddev)
fc = (mddev->layout >> 8) & 255;
fo = mddev->layout & (1<<16);
if (fc > 1 || fo > 0) {
- pr_err("only near layout is supported by clustered"
- " raid10\n");
+ pr_err("only near layout is supported by clustered raid10\n");
goto out_free_conf;
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fb6e0cf727a6..6b87481a9657 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1323,8 +1323,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %d on "
- "replacement disc %d\n",
+ pr_debug("%s: for %llu schedule op %d on replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
rbi->bi_opf, i);
atomic_inc(&sh->count);
@@ -4335,8 +4334,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
if (test_bit(R5_Insync, &dev->flags) &&
test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {
- pr_debug("Read_old block "
- "%d for Reconstruct\n", i);
+ pr_debug("Read_old block %d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
@@ -4438,8 +4436,8 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
- pr_warn_ratelimited("%s: mismatch sector in range "
- "%llu-%llu\n", mdname(conf->mddev),
+ pr_warn_ratelimited("%s: mismatch sector in range %llu-%llu\n",
+ mdname(conf->mddev),
(unsigned long long) sh->sector,
(unsigned long long) sh->sector +
RAID5_STRIPE_SECTORS(conf));
@@ -4603,8 +4601,8 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
- pr_warn_ratelimited("%s: mismatch sector in range "
- "%llu-%llu\n", mdname(conf->mddev),
+ pr_warn_ratelimited("%s: mismatch sector in range %llu-%llu\n",
+ mdname(conf->mddev),
(unsigned long long) sh->sector,
(unsigned long long) sh->sector +
RAID5_STRIPE_SECTORS(conf));
@@ -5094,8 +5092,7 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(STRIPE_BIOFILL_RUN, &sh->state);
}
- pr_debug("locked=%d uptodate=%d to_read=%d"
- " to_write=%d failed=%d failed_num=%d,%d\n",
+ pr_debug("locked=%d uptodate=%d to_read=%d to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
s.failed_num[0], s.failed_num[1]);
/*
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* [PATCH 34/34] md: avoid return in void functions [WARNING]
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (32 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 33/34] md: avoid splitting quoted strings [WARNING] heinzm
@ 2023-03-06 21:27 ` heinzm
2023-03-07 1:18 ` [PATCH 00/34] address various checkpatch.pl requirements Guoqing Jiang
34 siblings, 0 replies; 42+ messages in thread
From: heinzm @ 2023-03-06 21:27 UTC (permalink / raw)
To: linux-raid; +Cc: ncroxon, xni, dkeefe
From: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: heinzm <heinzm@redhat.com>
---
drivers/md/md.c | 1 -
drivers/md/raid0.c | 1 -
drivers/md/raid10.c | 1 -
3 files changed, 3 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3d17773e058f..4ea6b685a88c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9154,7 +9154,6 @@ void md_do_sync(struct md_thread *thread)
wake_up(&resync_wait);
md_wakeup_thread(mddev->thread);
- return;
}
EXPORT_SYMBOL_GPL(md_do_sync);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8897ab4baeb..e140fc37df68 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -587,7 +587,6 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
{
seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
- return;
}
static void *raid0_takeover_raid45(struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 60d7b1af229e..60b1c7b9357f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1266,7 +1266,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (mddev->gendisk)
trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), r10_bio->sector);
submit_bio_noacct(read_bio);
- return;
}
static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
--
2.39.2
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH 00/34] address various checkpatch.pl requirements
2023-03-06 21:27 [PATCH 00/34] address various checkpatch.pl requirements heinzm
` (33 preceding siblings ...)
2023-03-06 21:27 ` [PATCH 34/34] md: avoid return in void functions [WARNING] heinzm
@ 2023-03-07 1:18 ` Guoqing Jiang
[not found] ` <CAM23Vxqf-XMdoobeEyyk1MC=PzkWM=5w88jM8R-joxrrT82ukw@mail.gmail.com>
34 siblings, 1 reply; 42+ messages in thread
From: Guoqing Jiang @ 2023-03-07 1:18 UTC (permalink / raw)
To: heinzm, linux-raid; +Cc: ncroxon, xni, dkeefe
On 3/7/23 05:27, heinzm@redhat.com wrote:
> From: heinzm <heinzm@redhat.com>
>
> This patch series addresses checkpatch.pl reuirements.
>
> It is grouped into patches addressing errors first then warnings.
> Each patch fixes flaws in one semantical respect (e.g. fix spaces).
>
> Series passed upstream regression tests succesfully.
>
> Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
> Reviewed-by: Nigel Croxon <ncroxon@redhat.com>
> Reviewed-by: Xiao Ni <xni@redhat.com>
> Tested-by: Nigel Croxon <ncroxon@redhat.com>
> Tested-by: Xiao Ni <xni@redhat.com>
>
> Heinz Mauelshagen (34):
> md: fix required/prohibited spaces [ERROR]
> md: fix 'foo*' and 'foo * bar' [ERROR]
> md: fix EXPORT_SYMBOL() to follow its functions immediately [ERROR]
> md: adjust braces on functions/structures [ERROR]
> md: correct code indent [ERROR]
> md: move trailing statements to next line [ERROR]
> md: consistent spacing around operators [ERROR]
> md: don't initialize statics/globals to 0/false [ERROR]
> md: else should follow close curly brace [ERROR]
> md: remove trailing whitespace [ERROR]
> md: do not use assignment in if condition [ERROR]
> md: add missing blank line after declaration [WARNING]
> md: space prohibited between function and opening parenthesis [WARNING]
> md: prefer seq_put[cs]() to seq_printf() |WARNING]
> md: avoid multiple line dereference [WARNING}
> md: fix block comments [WARNING]
> md: add missing function identifier names to function definition arguments [WARNING]
> md: avoid redundant braces in single line statements [WARNING]
> md: place constant on the right side of a test [WARNING]
> md: avoid pointless filenames in files [WARNING]
> md: avoid useless else after break or return [WARNING]
> md: don't indent labels [WARNING]
> md: fix code indent for conditional statements [WARNING]
> md: prefer octal permissions [WARNING]
> md: remove bogus IS_ENABLED() macro [WARNING]
> md autodetect: correct placement of __initdata [WARNING]
> md: prefer using "%s...", __func__ [WARNING]
> md pq: adjust __attribute__ [WARNING]
> md: prefer 'unsigned int' [WARNING]
> md: prefer kmap_local_page() instead of deprecated kmap_atomic() [WARNING]
> md raid5: prefer 'int' instead of 'signed' [WARNING]
> md: prefer kvmalloc_array() with multiply [WARNING]
> md: avoid splitting quoted strings [WARNING]
> md: avoid return in void functions [WARNING]
Most of them do have empty log ...
And I don't think it makes sense to run checkpatch on old code.
Thanks,
Guoqing
^ permalink raw reply [flat|nested] 42+ messages in thread