summaryrefslogtreecommitdiff
path: root/tests/misc/printf-surprise
blob: dd3c332bb1120ebf3fff5a75d689c1e0519a0d4c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#!/bin/sh
# Detect printf(3) failure even when it doesn't set stream error indicator

# Copyright (C) 2007-2008 Free Software Foundation, Inc.

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

prog=printf

if test "$VERBOSE" = yes; then
  set -x
  env $prog --version
fi

. $srcdir/test-lib.sh
require_ulimit_

fail=0

# Up to coreutils-6.9, "printf %.Nf 0" would encounter an ENOMEM internal
# error from glibc's printf(3) function whenever N was large relative to
# the size of available memory.  As of Oct 2007, that internal stream-
# related failure was not reflected (for any libc I know of) in the usual
# stream error indicator that is tested by ferror.  The result was that
# while the printf command obviously failed (generated no output),
# it mistakenly exited successfully (exit status of 0).

# Testing it is tricky, because there is so much variance
# in quality for this corner of printf(3) implementations.
# Most implementations do attempt to allocate N bytes of storage.
# Using the maximum value for N (2^31-1) causes glibc-2.7 to try to
# allocate almost 2^64 bytes, while freeBSD 6.1's implementation
# correctly outputs almost 2GB worth of 0's, which takes too long.
# We want to test implementations that allocate N bytes, but without
# triggering the above extremes.

# Some other versions of glibc-2.7 have a snprintf function that segfaults
# when an internal (technically unnecessary!) memory allocation fails.

# The compromise is to limit virtual memory to something reasonable,
# and to make an N-byte-allocating-printf require more than that, thus
# triggering the printf(3) misbehavior -- which, btw, is required by ISO C99.

mkfifo fifo || framework_failure

head -c 10 fifo > out &
( ulimit -v 10000; env $prog %20000000f 0 2>err-msg > fifo )
exit=$?

# Map this longer, and rarer, diagnostic to the common one.
# printf: cannot perform formatted output: Cannot allocate memory" \
sed 's/cannot perform .*/write error/' err-msg > k && mv k err-msg
err_msg=$(cat err-msg)
case $err_msg in
  "$prog: write error") diagnostic=y ;;
  '') diagnostic=n ;;
  *) diagnostic=unexpected ;;
esac
n_out=$(wc -c < out)

case $n_out:$diagnostic:$exit in
  10:n:0) ;; # ok, succeeds w/no diagnostic: FreeBSD 6.1
  0:y:1)  ;; # ok, glibc-2.8 and newer, when printf(3) fails with ENOMEM

  *:139)     # segfault; known bug at least in debian unstable's libc6 2.7-11
     echo 1>&2 "$0: bug in snprintf causes low-mem use of printf to segfault"
     fail=77;;

  # 10:y) ;; # Fail: doesn't happen: nobody succeeds with a diagnostic
  # 0:n)  ;; # Fail pre-patch: no output, no diag
  *) fail=1;;
esac

(exit $fail); exit $fail