summaryrefslogtreecommitdiff
path: root/tests/dd
diff options
context:
space:
mode:
authorJim Meyering <meyering@redhat.com>2012-03-23 10:53:56 +0100
committerJim Meyering <meyering@redhat.com>2012-03-23 12:47:52 +0100
commit4b101ccd176eb3951bfbab717a0a3b5e2c4d19ef (patch)
tree1a3b5c74305fa49b7b628867b1c47da7607f5364 /tests/dd
parenta04110e5680b618489f05fd8d2b83571c3342590 (diff)
downloadcoreutils-4b101ccd176eb3951bfbab717a0a3b5e2c4d19ef.tar.xz
tests: skip part of dd/sparse on some file systems
* tests/dd/sparse: The last two parts of this test would fail due to the underlying file system at least on Solaris 10 with NFS. That file system would report that a 3MiB file was occupying <= 1KiB of space for nearly 50 seconds after creation. Improved-by: Bernhard Voelker
Diffstat (limited to 'tests/dd')
-rwxr-xr-xtests/dd/sparse54
1 files changed, 24 insertions, 30 deletions
diff --git a/tests/dd/sparse b/tests/dd/sparse
index feb94471b..35ddda91e 100755
--- a/tests/dd/sparse
+++ b/tests/dd/sparse
@@ -42,35 +42,29 @@ compare exp out || fail=1
dd if=file.in bs=1 conv=sparse | cat > file.out
cmp file.in file.out || fail=1
-# Setup for block size tests
-dd if=/dev/urandom of=file.in bs=1M count=1
-truncate -s+1M file.in
-dd if=/dev/urandom of=file.in bs=1M count=1 conv=notrunc oflag=append
-
-# Note the block allocations below are usually equal,
-# but can vary by a file system block due to alignment,
-# which was seen on XFS at least. Also on various BSDs
-# the sparse granularity was up to 8 file system blocks
-# (16KiB for the tested systems), causing this to be the
-# minimum accuracy we can support.
-alloc_equal() {
- # 8 and 512 below are related, so hardcode sector_size for now
- # : ${sector_size:=$(stat -c "%B" "$1")}
- : ${sectors_per_block:=$(expr $(stat -f -c '%S' "$1") / 512)}
- : ${min_sectors_per_sparse_block:=$(expr $sectors_per_block '*' 8)}
- alloc_diff=$(expr $(stat -c %b "$1") - $(stat -c %b "$2"))
- alloc_diff=$(echo $alloc_diff | tr -d -- -) # abs()
- test $alloc_diff -le $min_sectors_per_sparse_block
-}
-
-# Ensure NUL blocks smaller than the block size are not made sparse
-dd if=file.in of=file.out bs=2M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out && fail=1
-
-# Ensure NUL blocks >= block size are made sparse
-dd if=file.in of=file.out bs=1M conv=sparse
-test $(stat -c %s file.in) = $(stat -c %s file.out) || fail=1
-alloc_equal file.in file.out || fail=1
+# Setup for block size tests: create a 3MiB file with a 1MiB
+# stretch of NUL bytes in the middle.
+rm -f file.in
+dd if=/dev/urandom of=file.in bs=1M count=3 iflag=fullblock || fail=1
+dd if=/dev/zero of=file.in bs=1M count=1 seek=1 conv=notrunc || fail=1
+
+kb_alloc() { du -k "$1"|cut -f1; }
+
+# If our just-created input file appears to be too small,
+# skip the remaining tests. On at least Solaris 10 with NFS,
+# file.in is reported to occupy <= 1KiB for about 50 seconds
+# after its creation.
+if test $(kb_alloc file.in) -gt 3000; then
+
+ # Ensure NUL blocks smaller than the block size are not made sparse.
+ # Here, with a 2MiB block size, dd's conv=sparse must *not* introduce a hole.
+ dd if=file.in of=file.out bs=2M conv=sparse
+ test 2500 -lt $(kb_alloc file.out) || fail=1
+
+ # Ensure that this 1MiB string of NULs *is* converted to a hole.
+ dd if=file.in of=file.out bs=1M conv=sparse
+ test $(kb_alloc file.out) -lt 2500 || fail=1
+
+fi
Exit $fail