#! /bin/bash # FSQA Test No. 326 # # This test uses a loopback mount with PUNCH_HOLE support to test # whether discard operations are working as expected. # # It tests both -odiscard and fstrim. # # Copyright (C) 2015 SUSE. All Rights Reserved. # Author: Jeff Mahoney # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #----------------------------------------------------------------------- # seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 loopdev= tmpdir= _cleanup() { [ -n "$tmpdir" ] && umount $tmpdir [ -n "$loopdev" ] && losetup -d $loopdev } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs generic _supported_os Linux _require_scratch _require_fstrim MB_BYTES=1048576 GB_BYTES=1073741824 TEST_FS_SIZE_MB=1024 SMFILE_MIN_SIZE=512 SMFILE_MAX_SIZE=$(( 3 * $MB_BYTES )) LARGEFILE_SIZE_MB=100 # The test methodology is generic, but the values used by each file system # for the results to make sense may not be. if [ "$FSTYP" = "btrfs" ]; then MAX_REMAINING_MB=10 TEST_FS_SIZE_MB=10240 # Larger than a single block group LARGEFILE_SIZE_MB=$(( 12 * $GB_BYTES )) elif [ "$FSTYP" = "xfs" ]; then MAX_REMAINING_MB=50 else # ext4, probably, but sufficiently permissive that it should work anywhere # Special; Means at MAX_REMAINING_MB=0 fi test_fs_size_bytes=$(( $TEST_FS_SIZE_MB * $MB_BYTES )) rm -f $seqres.full _scratch_mkfs &>> $seqres.full _require_fs_space $SCRATCH_MNT $TEST_FS_SIZE_MB _scratch_mount blocks_used_kb() { blocks=$(( $(stat --format="%b*%B/1024" "$1") )) echo "# blocks_used_kb $1 ($2)" >> $seqres.full echo "$blocks $1" >> $seqres.full echo $blocks } random_size() { MIN=$1 MAX=$2 echo $(( $MIN + ($MAX - $MIN) * $RANDOM / 32768 )) } test_discard() { discard=$1 files=$2 tmpfile=$SCRATCH_MNT/testfs.img.$$ tmpdir=$SCRATCH_MNT/testdir.$$ testdir=$tmpdir/testdir mkdir -p $tmpdir || _fail "!!! failed to create temp mount dir" # Create a sparse file to host the file system $XFS_IO_PROG -f -t -c "truncate $test_fs_size_bytes" $tmpfile \ || _fail "!!! failed to create fs image file" opts="" if [ "$discard" = "discard" ]; then opts="-o discard" fi loopdev=$(losetup --show -f $tmpfile) _mkfs_dev $loopdev &> $seqres.full $MOUNT_PROG $opts $loopdev $tmpdir \ || _fail "!!! failed to loopback mount" $FSTRIM_PROG $tmpdir ESIZE="$(blocks_used_kb $tmpfile "empty filesystem")" if [ "$files" = "large" ]; then count=$(( $TEST_FS_SIZE_MB / $LARGEFILE_SIZE_MB )) random=false else count=$(( $TEST_FS_SIZE_MB * $MB_BYTES / $SMFILE_MAX_SIZE )) random=true fi mkdir -p $testdir for ((i = 1; i <= count; i++)); do SIZE=$(( $LARGEFILE_SIZE_MB * $MB_BYTES )) if $random; then SIZE=$(random_size $SMFILE_MIN_SIZE $SMFILE_MAX_SIZE) fi fn=${seq}_${i} $XFS_IO_PROG -f -c "pwrite -S 0xaa 0 $SIZE" $testdir/$fn \ &> /dev/null if [ $? -ne 0 ]; then echo "Failed creating file $fn" \ >>$seqres.full break fi done sync OSIZE="$(blocks_used_kb $tmpfile "before removing files")" rm -rf $testdir # Ensure everything's actually on the hosted file system if [ "$FSTYP" = "btrfs" ]; then _run_btrfs_util_prog filesystem sync $tmpdir fi sync if [ "$discard" = "trim" ]; then $FSTRIM_PROG $tmpdir fi $UMOUNT_PROG $tmpdir rmdir $tmpdir tmpdir= # Sync the backing file system to ensure the hole punches have # happened and we can trust the result. if [ "$FSTYP" = "btrfs" ]; then _run_btrfs_util_prog filesystem sync $SCRATCH_MNT fi sync NSIZE="$(blocks_used_kb $tmpfile "after trim")" # Going from ~ 10GB to 50MB is a good enough test to account for # metadata remaining on different file systems. if [ "$NSIZE" -gt $(( 50 * 1024 )) ]; then _fail "TRIM failed: before rm ${OSIZE}kB, after rm ${NSIZE}kB" fi rm $tmpfile losetup -d $loopdev loopdev= } echo "Testing with -odiscard, many small files" test_discard discard many echo "Testing with -odiscard, several large files" test_discard discard large echo "Testing with fstrim, many small files" test_discard trim many echo "Testing with fstrim, several large files" test_discard trim large status=0 exit