diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index f375a27..2922e9f 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -395,7 +395,7 @@ async_pq_zero_sum(struct page **blocks, unsigned int offset, int src_cnt, struct dma_async_tx_descriptor *tx = NULL; enum dma_ctrl_flags dma_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - BUG_ON(src_cnt < 2); + BUG_ON(src_cnt < 1); if (device && src_cnt <= dma_maxpq(device, 0) - 2) { dma_addr_t dma_src[src_cnt + 2]; @@ -486,7 +486,7 @@ async_syndrome_zero_sum(struct page **blocks, unsigned int offset, int src_cnt, struct dma_async_tx_descriptor *tx = NULL; enum dma_ctrl_flags dma_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - BUG_ON(src_cnt < 2); + BUG_ON(src_cnt < 1); if (device && src_cnt <= dma_maxpq(device, 0) - 2) { dma_addr_t dma_src[src_cnt + 2]; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 50cd370..70869bb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -183,7 +183,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, &dest, 1, src_list, src_cnt, len); - BUG_ON(src_cnt <= 1); + BUG_ON(src_cnt < 1); if (chan) { /* run the xor asynchronously */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a76ef52..5ce7090 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4357,8 +4357,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) mdname(mddev), mddev->new_layout); return ERR_PTR(-EIO); } - if (mddev->new_level == 6 && mddev->raid_disks < 4) { - printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", + if (mddev->new_level == 6 && mddev->raid_disks < 3) { + printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 3)\n", mdname(mddev), mddev->raid_disks); return ERR_PTR(-EINVAL); }