From ebc63d33ea2763165c6773de545825bb2b2d4264 Mon Sep 17 00:00:00 2001 From: Jim Meyering Date: Wed, 24 Aug 2011 10:36:25 +0200 Subject: tests: adjust the new, very expensive rm test to be less expensive * tests/rm/4-million-entry-dir: Create only 200,000 files, rather than 4 million. The latter was overkill, and was too likely to fail due to inode exhaustion. Not everyone is using btrfs yet. Now that this test doesn't take so long, label it as merely "expensive", rather than "very expensive". Thanks to Bernhard Voelker for pointing out the risk of inode exhaustion. --- tests/rm/4-million-entry-dir | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'tests/rm') diff --git a/tests/rm/4-million-entry-dir b/tests/rm/4-million-entry-dir index 23130a68e..44855cf26 100755 --- a/tests/rm/4-million-entry-dir +++ b/tests/rm/4-million-entry-dir @@ -1,5 +1,6 @@ #!/bin/sh -# in coreutils-8.12, this would have required ~1GB of memory +# In coreutils-8.12, rm,du,chmod, etc. would use too much memory +# when processing a directory with many entries (as in > 100,000). # Copyright (C) 2011 Free Software Foundation, Inc. @@ -17,19 +18,21 @@ # along with this program. If not, see . . "${srcdir=.}/init.sh"; path_prepend_ ../src -print_ver_ rm +print_ver_ rm du -very_expensive_ +expensive_ -# Put 4M files in a directory. +# With many files in a single directory... mkdir d && cd d || framework_failure_ -seq 4000000|xargs touch || framework_failure_ +seq 200000|xargs touch || framework_failure_ cd .. -# Restricted to 50MB, rm from coreutils-8.12 would fail with a -# diagnostic like "rm: fts_read failed: Cannot allocate memory". -ulimit -v 50000 +# Restricted to 40MB, rm from coreutils-8.12 each of these would fail +# with a diagnostic like "rm: fts_read failed: Cannot allocate memory". +ulimit -v 40000 +du -sh d || fail=1 +chmod -R 700 d || fail=1 rm -rf d || fail=1 Exit $fail -- cgit v1.2.3-70-g09d2