1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Revert r29986: "Imported minitest 2.0.0 r5952"

This breaks test-all:
* two test-all errors (test_run_passing and test_run_failing_filtered).
* -v option to test-all is ignored

Additional to say, please describe summary of the change when you
import from external repository.

And, RUN test-all BEFORE COMMIT.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@29988 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
naruse 2010-12-01 01:12:31 +00:00
parent 22b2c63855
commit 54fd6de064
10 changed files with 187 additions and 797 deletions

View file

@ -5,11 +5,6 @@ Wed Dec 1 09:28:27 2010 NARUSE, Yui <naruse@ruby-lang.org>
info if s/he has a debug package for ruby.
patched by Shinichiro Hamaji [ruby-dev:42655]
Wed Dec 1 07:42:42 2010 Ryan Davis <ryand-ruby@zenspider.com>
* lib/minitest/*.rb: Imported minitest 2.0.0 r5952.
* test/minitest/*.rb: ditto.
Wed Dec 1 01:29:15 2010 NARUSE, Yui <naruse@ruby-lang.org>
* string.c (rb_str_inspect): inspect as a dummy encoding string

View file

@ -1,330 +0,0 @@
############################################################
# This file is imported from a different project.
# DO NOT make modifications in this repo.
# File a patch instead and assign it to Ryan Davis
############################################################
require 'minitest/unit'
require 'minitest/spec'
class MiniTest::Unit
attr_accessor :runner
def run_benchmarks
_run_anything :benchmark
end
def benchmark_suite_header suite
"\n#{suite}\t#{suite.bench_range.join("\t")}"
end
class TestCase
##
# Returns a set of ranges stepped exponentially from +min+ to
# +max+ by powers of +base+. Eg:
#
# bench_exp(2, 16, 2) # => [2, 4, 8, 16]
def self.bench_exp min, max, base = 10
min = (Math.log10(min) / Math.log10(base)).to_i
max = (Math.log10(max) / Math.log10(base)).to_i
(min..max).map { |m| base ** m }.to_a
end
##
# Returns a set of ranges stepped linearly from +min+ to +max+ by
# +step+. Eg:
#
# bench_linear(20, 40, 10) # => [20, 30, 40]
def self.bench_linear min, max, step = 10
(min..max).step(step).to_a
rescue LocalJumpError # 1.8.6
r = []; (min..max).step(step) { |n| r << n }; r
end
##
# Returns the benchmark methods (methods that start with bench_)
# for that class.
def self.benchmark_methods # :nodoc:
public_instance_methods(true).grep(/^bench_/).map { |m| m.to_s }.sort
end
##
# Returns all test suites that have benchmark methods.
def self.benchmark_suites
TestCase.test_suites.reject { |s| s.benchmark_methods.empty? }
end
##
# Specifies the ranges used for benchmarking for that class.
# Defaults to exponential growth from 1 to 10k by powers of 10.
# Override if you need different ranges for your benchmarks.
#
# See also: ::bench_exp and ::bench_linear.
def self.bench_range
bench_exp 1, 10_000
end
##
# Runs the given +work+, gathering the times of each run. Range
# and times are then passed to a given +validation+ proc. Outputs
# the benchmark name and times in tab-separated format, making it
# easy to paste into a spreadsheet for graphing or further
# analysis.
#
# Ranges are specified by ::bench_range.
#
# Eg:
#
# def bench_algorithm
# validation = proc { |x, y| ... }
# assert_performance validation do |x|
# @obj.algorithm
# end
# end
def assert_performance validation, &work
range = self.class.bench_range
io.print "#{__name__}"
times = []
range.each do |x|
GC.start
t0 = Time.now
instance_exec(x, &work)
t = Time.now - t0
io.print "\t%9.6f" % t
times << t
end
io.puts
validation[range, times]
end
##
# Runs the given +work+ and asserts that the times gathered fit to
# match a constant rate (eg, linear slope == 0) within a given error
# +threshold+.
#
# Fit is calculated by #fit_constant.
#
# Ranges are specified by ::bench_range.
#
# Eg:
#
# def bench_algorithm
# assert_performance_constant 0.9999 do |x|
# @obj.algorithm
# end
# end
def assert_performance_constant threshold = 0.99, &work
validation = proc do |range, times|
a, b, rr = fit_linear range, times
assert_in_delta 0, b, 1 - threshold
[a, b, rr]
end
assert_performance validation, &work
end
##
# Runs the given +work+ and asserts that the times gathered fit to
# match a exponential curve within a given error +threshold+.
#
# Fit is calculated by #fit_exponential.
#
# Ranges are specified by ::bench_range.
#
# Eg:
#
# def bench_algorithm
# assert_performance_exponential 0.9999 do |x|
# @obj.algorithm
# end
# end
def assert_performance_exponential threshold = 0.99, &work
assert_performance validation_for_fit(:exponential, threshold), &work
end
##
# Runs the given +work+ and asserts that the times gathered fit to
# match a straight line within a given error +threshold+.
#
# Fit is calculated by #fit_linear.
#
# Ranges are specified by ::bench_range.
#
# Eg:
#
# def bench_algorithm
# assert_performance_linear 0.9999 do |x|
# @obj.algorithm
# end
# end
def assert_performance_linear threshold = 0.99, &work
assert_performance validation_for_fit(:linear, threshold), &work
end
##
# Runs the given +work+ and asserts that the times gathered curve
# fit to match a power curve within a given error +threshold+.
#
# Fit is calculated by #fit_power.
#
# Ranges are specified by ::bench_range.
#
# Eg:
#
# def bench_algorithm
# assert_performance_power 0.9999 do |x|
# @obj.algorithm
# end
# end
def assert_performance_power threshold = 0.99, &work
assert_performance validation_for_fit(:power, threshold), &work
end
##
# Takes an array of x/y pairs and calculates the general R^2 value.
#
# See: http://en.wikipedia.org/wiki/Coefficient_of_determination
def fit_error xys
y_bar = sigma(xys) { |x, y| y } / xys.size.to_f
ss_tot = sigma(xys) { |x, y| (y - y_bar) ** 2 }
ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 }
1 - (ss_err / ss_tot)
end
##
# To fit a functional form: y = ae^(bx).
#
# Takes x and y values and returns [a, b, r^2].
#
# See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html
def fit_exponential xs, ys
n = xs.size
xys = xs.zip(ys)
sxlny = sigma(xys) { |x,y| x * Math.log(y) }
slny = sigma(xys) { |x,y| Math.log(y) }
sx2 = sigma(xys) { |x,y| x * x }
sx = sigma xs
c = n * sx2 - sx ** 2
a = (slny * sx2 - sx * sxlny) / c
b = ( n * sxlny - sx * slny ) / c
return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) }
end
##
# Fits the functional form: a + bx.
#
# Takes x and y values and returns [a, b, r^2].
#
# See: http://mathworld.wolfram.com/LeastSquaresFitting.html
def fit_linear xs, ys
n = xs.size
xys = xs.zip(ys)
sx = sigma xs
sy = sigma ys
sx2 = sigma(xs) { |x| x ** 2 }
sxy = sigma(xys) { |x,y| x * y }
c = n * sx2 - sx**2
a = (sy * sx2 - sx * sxy) / c
b = ( n * sxy - sx * sy ) / c
return a, b, fit_error(xys) { |x| a + b * x }
end
##
# To fit a functional form: y = ax^b.
#
# Takes x and y values and returns [a, b, r^2].
#
# See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
def fit_power xs, ys
n = xs.size
xys = xs.zip(ys)
slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) }
slnx = sigma(xs) { |x | Math.log(x) }
slny = sigma(ys) { | y| Math.log(y) }
slnx2 = sigma(xs) { |x | Math.log(x) ** 2 }
b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2);
a = (slny - b * slnx) / n
return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) }
end
##
# Enumerates over +enum+ mapping +block+ if given, returning the
# sum of the result. Eg:
#
# sigma([1, 2, 3]) # => 1 + 2 + 3 => 7
# sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14
def sigma enum, &block
enum = enum.map(&block) if block
enum.inject { |sum, n| sum + n }
end
##
# Returns a proc that calls the specified fit method and asserts
# that the error is within a tolerable threshold.
def validation_for_fit msg, threshold
proc do |range, times|
a, b, rr = send "fit_#{msg}", range, times
assert_operator rr, :>=, threshold
[a, b, rr]
end
end
end
end
class MiniTest::Spec
def self.bench name, &block
define_method "bench_#{name.gsub(/\W+/, '_')}", &block
end
def self.bench_range &block
meta = (class << self; self; end)
meta.send :define_method, "bench_range", &block
end
def self.bench_performance_linear name, threshold = 0.9, &work
bench name do
assert_performance_linear threshold, &work
end
end
def self.bench_performance_constant name, threshold = 0.99, &work
bench name do
assert_performance_constant threshold, &work
end
end
def self.bench_performance_exponential name, threshold = 0.99, &work
bench name do
assert_performance_exponential threshold, &work
end
end
end

View file

@ -14,7 +14,14 @@ module MiniTest
end
def expect(name, retval, args=[])
n, r = name, retval # for the closure below
@expected_calls[name] = { :retval => retval, :args => args }
self.class.__send__ :remove_method, name if respond_to? name
self.class.__send__(:define_method, name) { |*x|
raise ArgumentError unless @expected_calls[n][:args].size == x.size
@actual_calls[n] << { :retval => r, :args => x }
retval
}
self
end
@ -27,19 +34,5 @@ module MiniTest
end
true
end
def method_missing(sym, *args)
raise NoMethodError unless @expected_calls.has_key?(sym)
raise ArgumentError unless @expected_calls[sym][:args].size == args.size
retval = @expected_calls[sym][:retval]
@actual_calls[sym] << { :retval => retval, :args => args }
retval
end
alias :original_respond_to? :respond_to?
def respond_to?(sym)
return true if @expected_calls.has_key?(sym)
return original_respond_to?(sym)
end
end
end

View file

@ -1,41 +0,0 @@
############################################################
# This file is imported from a different project.
# DO NOT make modifications in this repo.
# File a patch instead and assign it to Ryan Davis
############################################################
require "minitest/unit"
##
# Show your testing pride!
class PrideIO
attr_reader :io
# stolen from /System/Library/Perl/5.10.0/Term/ANSIColor.pm
COLORS = (31..36).to_a
CHARS = ["*"]
def initialize io
@io = io
@colors = COLORS.cycle
@chars = CHARS.cycle
end
def print o
case o
when "." then
io.print "\e[#{@colors.next}m#{@chars.next}\e[0m"
when "E", "F" then
io.print "\e[41m\e[37m#{o}\e[0m"
else
io.print o
end
end
def method_missing msg, *args
io.send(msg, *args)
end
end
MiniTest::Unit.output = PrideIO.new(MiniTest::Unit.output)

View file

@ -66,13 +66,10 @@ module Kernel
def describe desc, &block
stack = MiniTest::Spec.describe_stack
name = [stack.last, desc].compact.join("::")
cls = Class.new(stack.last || MiniTest::Spec)
# :stopdoc:
# omg this sucks
(class << cls; self; end).send(:define_method, :to_s) { name }
# :startdoc:
name = desc.to_s.split(/\W+/).map { |s| s.capitalize }.join + "Spec"
prev = stack.last
name = "#{prev == MiniTest::Spec ? nil : prev}::#{name}"
cls = Object.class_eval "class #{name} < #{prev}; end; #{name}"
cls.nuke_test_methods!
@ -99,7 +96,7 @@ end
class MiniTest::Spec < MiniTest::Unit::TestCase
@@describe_stack = []
@@describe_stack = [MiniTest::Spec]
def self.describe_stack # :nodoc:
@@describe_stack
end

View file

@ -8,8 +8,6 @@ require 'optparse'
##
# Minimal (mostly drop-in) replacement for test-unit.
#
# :include: README.txt
module MiniTest
@ -29,8 +27,7 @@ module MiniTest
require 'pathname'
pwd = Pathname.new Dir.pwd
pn = Pathname.new File.expand_path(__FILE__)
relpath = pn.relative_path_from(pwd) rescue pn
pn = File.join ".", relpath unless pn.relative?
pn = File.join(".", pn.relative_path_from(pwd)) unless pn.relative?
pn.to_s
else # assume both are expanded
__FILE__
@ -43,19 +40,13 @@ module MiniTest
return ["No backtrace"] unless bt
new_bt = []
unless $DEBUG then
bt.each do |line|
break if line.rindex MINI_DIR, 0
break if line.rindex(MINI_DIR, 0)
new_bt << line
end
new_bt = bt.reject { |line| line.rindex MINI_DIR, 0 } if new_bt.empty?
new_bt = bt.reject { |line| line.rindex(MINI_DIR, 0) } if new_bt.empty?
new_bt = bt.dup if new_bt.empty?
else
new_bt = bt.dup
end
new_bt
end
@ -71,7 +62,7 @@ module MiniTest
def mu_pp obj
s = obj.inspect
s = s.force_encoding Encoding.default_external if defined? Encoding
s = s.force_encoding(Encoding.default_external) if defined? Encoding
s
end
@ -100,14 +91,15 @@ module MiniTest
# Fails unless the block returns a true value.
def assert_block msg = nil
assert yield, "Expected block to return true value."
msg = message(msg) { "Expected block to return true value" }
assert yield, msg
end
##
# Fails unless +obj+ is empty.
def assert_empty obj, msg = nil
msg = message(msg) { "Expected #{mu_pp(obj)} to be empty" }
msg = message(msg) { "Expected #{obj.inspect} to be empty" }
assert_respond_to obj, :empty?
assert obj.empty?, msg
end
@ -520,61 +512,27 @@ module MiniTest
end
class Unit
VERSION = "2.0.0" # :nodoc:
VERSION = "1.7.2" # :nodoc:
attr_accessor :report, :failures, :errors, :skips # :nodoc:
attr_accessor :test_count, :assertion_count # :nodoc:
attr_accessor :start_time # :nodoc:
attr_accessor :help # :nodoc:
attr_accessor :verbose # :nodoc:
attr_writer :options # :nodoc:
def options
@options ||= {}
end
@@installed_at_exit ||= false
@@out = $stdout
##
# A simple hook allowing you to run a block of code after the
# tests are done. Eg:
#
# MiniTest::Unit.after_tests { p $debugging_info }
def self.after_tests
at_exit { at_exit { yield } }
end
##
# Registers MiniTest::Unit to run tests at process exit
def self.autorun
at_exit {
next if $! # don't run if there was an exception
exit_code = MiniTest::Unit.new.run ARGV
exit_code = MiniTest::Unit.new.run(ARGV)
exit false if exit_code && exit_code != 0
} unless @@installed_at_exit
@@installed_at_exit = true
end
##
# Returns the stream to use for output.
def self.output
@@out
end
##
# Returns the stream to use for output.
#
# DEPRECATED: use ::output instead.
def self.out
warn "::out deprecated, use ::output instead." if $VERBOSE
output
end
##
# Sets MiniTest::Unit to write output to +stream+. $stdout is the default
# output
@ -583,93 +541,6 @@ module MiniTest
@@out = stream
end
##
# Return all plugins' run methods (methods that start with "run_").
def self.plugins
@@plugins ||= (["run_tests"] +
public_instance_methods(false).
grep(/^run_/).map { |s| s.to_s }).uniq
end
def output
self.class.output
end
def puts *a # :nodoc:
output.puts(*a)
end
def print *a # :nodoc:
output.print(*a)
end
def _run_anything type
suites = TestCase.send "#{type}_suites"
return if suites.empty?
start = Time.now
puts
puts "# Running #{type}s:"
puts
@test_count, @assertion_count = 0, 0
sync = output.respond_to? :"sync=" # stupid emacs
old_sync, output.sync = output.sync, true if sync
results = _run_suites suites, type
@test_count = results.inject(0) { |sum, (tc, ac)| sum + tc }
@assertion_count = results.inject(0) { |sum, (tc, ac)| sum + ac }
output.sync = old_sync if sync
t = Time.now - start
puts
puts
puts "Finished #{type}s in %.6fs, %.4f tests/s, %.4f assertions/s." %
[t, test_count / t, assertion_count / t]
report.each_with_index do |msg, i|
puts "\n%3d) %s" % [i + 1, msg]
end
puts
status
end
def _run_suites suites, type
suites.map { |suite| _run_suite suite, type }
end
def _run_suite suite, type
header = "#{type}_suite_header"
puts send(header, suite) if respond_to? header
filter = options[:filter] || '/./'
filter = Regexp.new $1 if filter =~ /\/(.*)\//
assertions = suite.send("#{type}_methods").grep(filter).map { |method|
inst = suite.new method
inst._assertions = 0
start_time = Time.now
result = inst.run self
time = Time.now - start_time
print "#{suite}##{method} = %.2f s = " % time if @verbose
print result
puts if @verbose
inst._assertions
}
return assertions.size, assertions.inject(0) { |sum, n| sum + n }
end
def location e # :nodoc:
last_before_assertion = ""
e.backtrace.reverse_each do |s|
@ -693,7 +564,7 @@ module MiniTest
"Failure:\n#{meth}(#{klass}) [#{location e}]:\n#{e.message}\n"
else
@errors += 1
bt = MiniTest::filter_backtrace(e.backtrace).join "\n "
bt = MiniTest::filter_backtrace(e.backtrace).join("\n ")
"Error:\n#{meth}(#{klass}):\n#{e.class}: #{e.message}\n #{bt}\n"
end
@report << e
@ -708,7 +579,6 @@ module MiniTest
def process_args args = []
options = {}
orig_args = args.dup
OptionParser.new do |opts|
opts.banner = 'minitest options:'
@ -731,21 +601,9 @@ module MiniTest
options[:filter] = a
end
opts.parse! args
orig_args -= args
opts.parse args
end
unless options[:seed] then
srand
options[:seed] = srand % 0xFFFF
orig_args << "--seed" << options[:seed].to_s
end
srand options[:seed]
self.verbose = options[:verbose]
@help = orig_args.map { |s| s =~ /[\s|&<>$()]/ ? s.inspect : s }.join " "
options
end
@ -753,40 +611,89 @@ module MiniTest
# Top level driver, controls all output and filtering.
def run args = []
self.options = process_args args
options = process_args args
puts "Run options: #{help}"
@verbose = options[:verbose]
self.class.plugins.each do |plugin|
send plugin
break unless report.empty?
filter = options[:filter] || '/./'
filter = Regexp.new $1 if filter and filter =~ /\/(.*)\//
seed = options[:seed]
unless seed then
srand
seed = srand % 0xFFFF
end
srand seed
help = ["--seed", seed]
help.push "--verbose" if @verbose
help.push("--name", options[:filter].inspect) if options[:filter]
@@out.puts "Test run options: #{help.join(" ")}"
@@out.puts
@@out.puts "Loaded suite #{$0.sub(/\.rb$/, '')}\nStarted"
start = Time.now
run_test_suites filter
@@out.puts
@@out.puts "Finished in #{'%.6f' % (Time.now - start)} seconds."
@report.each_with_index do |msg, i|
@@out.puts "\n%3d) %s" % [i + 1, msg]
end
@@out.puts
status
@@out.puts
@@out.puts "Test run options: #{help.join(" ")}"
return failures + errors if @test_count > 0 # or return nil...
rescue Interrupt
abort 'Interrupted'
end
##
# Runs test suites matching +filter+.
def run_tests
_run_anything :test
end
##
# Writes status to +io+
def status io = self.output
def status io = @@out
format = "%d tests, %d assertions, %d failures, %d errors, %d skips"
io.puts format % [test_count, assertion_count, failures, errors, skips]
end
##
# Runs test suites matching +filter+
def run_test_suites filter = /./
@test_count, @assertion_count = 0, 0
old_sync, @@out.sync = @@out.sync, true if @@out.respond_to? :sync=
TestCase.test_suites.each do |suite|
suite.test_methods.grep(filter).each do |test|
inst = suite.new test
inst._assertions = 0
@@out.print "#{suite}##{test}: " if @verbose
@start_time = Time.now
result = inst.run(self)
@@out.print "%.2f s: " % (Time.now - @start_time) if @verbose
@@out.print result
@@out.puts if @verbose
@test_count += 1
@assertion_count += inst._assertions
end
end
@@out.sync = old_sync if @@out.respond_to? :sync=
[@test_count, @assertion_count]
end
##
# Subclass TestCase to create your own tests. Typically you'll want a
# TestCase subclass per implementation class.
#
# See MiniTest::Assertions
class TestCase
attr_reader :__name__ # :nodoc:
@ -800,31 +707,30 @@ module MiniTest
# Runs the tests reporting the status to +runner+
def run runner
trap "INFO" do
time = Time.now - runner.start_time
warn "%s#%s %.2fs" % [self.class, self.__name__, time]
trap 'INFO' do
warn '%s#%s %.2fs' % [self.class, self.__name__,
(Time.now - runner.start_time)]
runner.status $stderr
end if SUPPORTS_INFO_SIGNAL
result = ""
result = '.'
begin
@passed = nil
self.setup
self.__send__ self.__name__
result = "." unless io?
@passed = true
rescue *PASSTHROUGH_EXCEPTIONS
raise
rescue Exception => e
@passed = false
result = runner.puke self.class, self.__name__, e
result = runner.puke(self.class, self.__name__, e)
ensure
begin
self.teardown
rescue *PASSTHROUGH_EXCEPTIONS
raise
rescue Exception => e
result = runner.puke self.class, self.__name__, e
result = runner.puke(self.class, self.__name__, e)
end
trap 'INFO', 'DEFAULT' if SUPPORTS_INFO_SIGNAL
end
@ -833,19 +739,9 @@ module MiniTest
def initialize name # :nodoc:
@__name__ = name
@__io__ = nil
@passed = nil
end
def io
@__io__ = true
MiniTest::Unit.output
end
def io?
@__io__
end
def self.reset # :nodoc:
@@test_suites = {}
end
@ -866,7 +762,7 @@ module MiniTest
end
def self.test_suites # :nodoc:
@@test_suites.keys.sort_by { |ts| ts.name.to_s }
@@test_suites.keys.sort_by { |ts| ts.name }
end
def self.test_methods # :nodoc:
@ -875,7 +771,7 @@ module MiniTest
case self.test_order
when :random then
max = methods.size
methods.sort.sort_by { rand max }
methods.sort.sort_by { rand(max) }
when :alpha, :sorted then
methods.sort
else
@ -917,3 +813,4 @@ if $DEBUG then
end
end
end

View file

@ -9,39 +9,39 @@ require 'minitest/unit'
MiniTest::Unit.autorun
class TestMiniTestMock < MiniTest::Unit::TestCase
class TestMiniMock < MiniTest::Unit::TestCase
def setup
@mock = MiniTest::Mock.new.expect(:foo, nil)
@mock.expect(:meaning_of_life, 42)
end
def test_create_stub_method
def test_should_create_stub_method
assert_nil @mock.foo
end
def test_allow_return_value_specification
def test_should_allow_return_value_specification
assert_equal 42, @mock.meaning_of_life
end
def test_blow_up_if_not_called
def test_should_blow_up_if_not_called
@mock.foo
util_verify_bad
end
def test_not_blow_up_if_everything_called
def test_should_not_blow_up_if_everything_called
@mock.foo
@mock.meaning_of_life
assert @mock.verify
end
def test_allow_expectations_to_be_added_after_creation
def test_should_allow_expectations_to_be_added_after_creation
@mock.expect(:bar, true)
assert @mock.bar
end
def test_not_verify_if_new_expected_method_is_not_called
def test_should_not_verify_if_new_expected_method_is_not_called
@mock.foo
@mock.meaning_of_life
@mock.expect(:bar, true)
@ -49,13 +49,13 @@ class TestMiniTestMock < MiniTest::Unit::TestCase
util_verify_bad
end
def test_not_verify_if_unexpected_method_is_called
def test_should_not_verify_if_unexpected_method_is_called
assert_raises NoMethodError do
@mock.unexpected
end
end
def test_blow_up_on_wrong_number_of_arguments
def test_should_blow_up_on_wrong_number_of_arguments
@mock.foo
@mock.meaning_of_life
@mock.expect(:sum, 3, [1, 2])
@ -65,7 +65,7 @@ class TestMiniTestMock < MiniTest::Unit::TestCase
end
end
def test_blow_up_on_wrong_arguments
def test_should_blow_up_on_wrong_arguments
@mock.foo
@mock.meaning_of_life
@mock.expect(:sum, 3, [1, 2])
@ -75,35 +75,6 @@ class TestMiniTestMock < MiniTest::Unit::TestCase
util_verify_bad
end
def test_respond_appropriately
assert @mock.respond_to?(:foo)
assert !@mock.respond_to?(:bar)
end
def test_no_method_error_on_unexpected_methods
assert_raises NoMethodError do
@mock.bar
end
end
def test_assign_per_mock_return_values
a = MiniTest::Mock.new
b = MiniTest::Mock.new
a.expect(:foo, :a)
b.expect(:foo, :b)
assert_equal :a, a.foo
assert_equal :b, b.foo
end
def test_do_not_create_stub_method_on_new_mocks
a = MiniTest::Mock.new
a.expect(:foo, :a)
assert !MiniTest::Mock.new.respond_to?(:foo)
end
def util_verify_bad
assert_raises MockExpectationError do
@mock.verify

View file

@ -203,7 +203,7 @@ end
class TestMeta < MiniTest::Unit::TestCase
def test_structure
x = y = z = nil
x = y = nil
x = describe "top-level thingy" do
before {}
after {}
@ -213,23 +213,13 @@ class TestMeta < MiniTest::Unit::TestCase
y = describe "inner thingy" do
before {}
it "inner-it" do end
z = describe "very inner thingy" do
before {}
it "inner-it" do end
end
end
end
assert_equal "top-level thingy", x.to_s
assert_equal "top-level thingy::inner thingy", y.to_s
assert_equal "top-level thingy::inner thingy::very inner thingy", z.to_s
top_methods = %w(setup teardown test_0001_top_level_it)
inner_methods = %w(setup test_0001_inner_it)
assert_equal top_methods, x.instance_methods(false).sort.map {|o| o.to_s }
assert_equal inner_methods, y.instance_methods(false).sort.map {|o| o.to_s }
assert_equal inner_methods, z.instance_methods(false).sort.map {|o| o.to_s }
end
end

View file

@ -10,31 +10,35 @@ require 'minitest/unit'
MiniTest::Unit.autorun
module MyModule; end
class AnError < StandardError; include MyModule; end
module M; end
class E < StandardError; include M; end
class TestMiniTestUnit < MiniTest::Unit::TestCase
class TestMiniTest < MiniTest::Unit::TestCase
pwd = Pathname.new(File.expand_path(Dir.pwd))
basedir = Pathname.new(File.expand_path(MiniTest::MINI_DIR)) + 'mini'
basedir = basedir.relative_path_from(pwd).to_s
MINITEST_BASE_DIR = basedir[/\A\./] ? basedir : "./#{basedir}"
BT_MIDDLE = ["#{MINITEST_BASE_DIR}/test.rb:161:in `each'",
BT_MIDDLE = ["#{MINITEST_BASE_DIR}/test.rb:165:in `run_test_suites'",
"#{MINITEST_BASE_DIR}/test.rb:161:in `each'",
"#{MINITEST_BASE_DIR}/test.rb:161:in `run_test_suites'",
"#{MINITEST_BASE_DIR}/test.rb:158:in `each'",
"#{MINITEST_BASE_DIR}/test.rb:158:in `run_test_suites'",
"#{MINITEST_BASE_DIR}/test.rb:139:in `run'",
"#{MINITEST_BASE_DIR}/test.rb:106:in `run'"]
def assert_report expected = nil
expected ||= "Run options: --seed 42
# Running tests:
expected ||= "Test run options: --seed 42
Loaded suite blah
Started
.
Finished tests in 0.00
Finished in 0.00
1 tests, 1 assertions, 0 failures, 0 errors, 0 skips
Test run options: --seed 42
"
output = @output.string.sub(/Finished tests in .*/, "Finished tests in 0.00")
output = @output.string.sub(/Finished in .*/, "Finished in 0.00")
output.sub!(/Loaded suite .*/, 'Loaded suite blah')
output.sub!(/^(\s+)(?:#{Regexp.union(__FILE__, File.expand_path(__FILE__))}):\d+:/o, '\1FILE:LINE:')
output.sub!(/\[(?:#{Regexp.union(__FILE__, File.expand_path(__FILE__))}):\d+\]/o, '[FILE:LINE]')
@ -47,6 +51,7 @@ Finished tests in 0.00
@tu = MiniTest::Unit.new
@output = StringIO.new("")
MiniTest::Unit.output = @output
assert_equal [0, 0], @tu.run_test_suites
end
def teardown
@ -146,6 +151,18 @@ Finished tests in 0.00
assert_match(/^Exception.*Oh no again!/m, @tu.report.first)
end
def test_class_run_test_suites
tc = Class.new(MiniTest::Unit::TestCase) do
def test_something
assert true
end
end
Object.const_set(:ATestCase, tc)
assert_equal [1, 1], @tu.run_test_suites
end
def test_filter_backtrace
# this is a semi-lame mix of relative paths.
# I cheated by making the autotest parts not have ./
@ -201,15 +218,14 @@ Finished tests in 0.00
Object.const_set(:ATestCase, tc)
@tu.run %w[--seed 42]
@tu.run %w[-s 42]
expected = "Run options: --seed 42
# Running tests:
expected = "Test run options: --seed 42
Loaded suite blah
Started
E.
Finished tests in 0.00
Finished in 0.00
1) Error:
test_error(ATestCase):
@ -217,6 +233,8 @@ RuntimeError: unhandled exception
FILE:LINE:in `test_error'
2 tests, 1 assertions, 0 failures, 1 errors, 0 skips
Test run options: --seed 42
"
assert_report expected
end
@ -234,15 +252,14 @@ RuntimeError: unhandled exception
Object.const_set(:ATestCase, tc)
@tu.run %w[--seed 42]
@tu.run %w[-s 42]
expected = "Run options: --seed 42
# Running tests:
expected = "Test run options: --seed 42
Loaded suite blah
Started
E
Finished tests in 0.00
Finished in 0.00
1) Error:
test_something(ATestCase):
@ -250,6 +267,8 @@ RuntimeError: unhandled exception
FILE:LINE:in `teardown'
1 tests, 1 assertions, 0 failures, 1 errors, 0 skips
Test run options: --seed 42
"
assert_report expected
end
@ -267,21 +286,22 @@ RuntimeError: unhandled exception
Object.const_set(:ATestCase, tc)
@tu.run %w[--seed 42]
@tu.run %w[-s 42]
expected = "Run options: --seed 42
# Running tests:
expected = "Test run options: --seed 42
Loaded suite blah
Started
F.
Finished tests in 0.00
Finished in 0.00
1) Failure:
test_failure(ATestCase) [FILE:LINE]:
Failed assertion, no message given.
2 tests, 2 assertions, 1 failures, 0 errors, 0 skips
Test run options: --seed 42
"
assert_report expected
end
@ -299,17 +319,18 @@ Failed assertion, no message given.
Object.const_set(:ATestCase, tc)
@tu.run %w[--name /some|thing/ --seed 42]
@tu.run %w[-n /something/ -s 42]
expected = "Run options: --name \"/some|thing/\" --seed 42
# Running tests:
expected = "Test run options: --seed 42 --name \"/something/\"
Loaded suite blah
Started
.
Finished tests in 0.00
Finished in 0.00
1 tests, 1 assertions, 0 failures, 0 errors, 0 skips
Test run options: --seed 42 --name \"/something/\"
"
assert_report expected
end
@ -323,7 +344,7 @@ Finished tests in 0.00
Object.const_set(:ATestCase, tc)
@tu.run %w[--seed 42]
@tu.run %w[-s 42]
assert_report
end
@ -341,21 +362,22 @@ Finished tests in 0.00
Object.const_set(:ATestCase, tc)
@tu.run %w[--seed 42]
@tu.run %w[-s 42]
expected = "Run options: --seed 42
# Running tests:
expected = "Test run options: --seed 42
Loaded suite blah
Started
S.
Finished tests in 0.00
Finished in 0.00
1) Skipped:
test_skip(ATestCase) [FILE:LINE]:
not yet
2 tests, 1 assertions, 0 failures, 0 errors, 1 skips
Test run options: --seed 42
"
assert_report expected
end
@ -369,7 +391,7 @@ not yet
end
end
class TestMiniTestUnitTestCase < MiniTest::Unit::TestCase
class TestMiniTestTestCase < MiniTest::Unit::TestCase
def setup
MiniTest::Unit::TestCase.reset
@ -526,9 +548,9 @@ class TestMiniTestUnitTestCase < MiniTest::Unit::TestCase
pattern = Object.new
def pattern.=~(other) false end
def pattern.inspect; "[Object]" end
def pattern.inspect; "<<Object>>" end
util_assert_triggered 'Expected [Object] to match 5.' do
util_assert_triggered 'Expected <<Object>> to match 5.' do
@tc.assert_match pattern, 5
end
end
@ -641,8 +663,8 @@ class TestMiniTestUnitTestCase < MiniTest::Unit::TestCase
end
def test_assert_raises_module
@tc.assert_raises MyModule do
raise AnError
@tc.assert_raises M do
raise E
end
end
@ -714,13 +736,13 @@ FILE:LINE:in `test_assert_raises_triggered_different_msg'
def test_assert_raises_triggered_subclass
e = assert_raises MiniTest::Assertion do
@tc.assert_raises StandardError do
raise AnError
raise E
end
end
expected = "[StandardError] exception expected, not
Class: <AnError>
Message: <\"AnError\">
Class: <E>
Message: <\"E\">
---Backtrace---
FILE:LINE:in `test_assert_raises_triggered_subclass'
---------------"
@ -1001,9 +1023,9 @@ FILE:LINE:in `test_assert_raises_triggered_subclass'
pattern = Object.new
def pattern.=~(other) true end
def pattern.inspect; "[Object]" end
def pattern.inspect; "<<Object>>" end
util_assert_triggered 'Expected [Object] to not match 5.' do
util_assert_triggered 'Expected <<Object>> to not match 5.' do
@tc.refute_match pattern, 5
end
end

View file

@ -1,104 +0,0 @@
############################################################
# This file is imported from a different project.
# DO NOT make modifications in this repo.
# File a patch instead and assign it to Ryan Davis
############################################################
require 'minitest/autorun'
require 'minitest/benchmark'
##
# Used to verify data:
# http://www.wolframalpha.com/examples/RegressionAnalysis.html
class TestMiniTestBenchmark < MiniTest::Unit::TestCase
def test_cls_bench_exp
assert_equal [2, 4, 8, 16, 32], self.class.bench_exp(2, 32, 2)
end
def test_cls_bench_linear
assert_equal [2, 4, 6, 8, 10], self.class.bench_linear(2, 10, 2)
end
def test_cls_benchmark_methods
assert_equal [], self.class.benchmark_methods
c = Class.new(MiniTest::Unit::TestCase) do
def bench_blah
end
end
assert_equal ["bench_blah"], c.benchmark_methods
end
def test_cls_bench_range
assert_equal [1, 10, 100, 1_000, 10_000], self.class.bench_range
end
def test_fit_exponential_clean
x = [1.0, 2.0, 3.0, 4.0, 5.0]
y = x.map { |n| 1.1 * Math.exp(2.1 * n) }
assert_fit :exponential, x, y, 1.0, 1.1, 2.1
end
def test_fit_exponential_noisy
x = [1.0, 1.9, 2.6, 3.4, 5.0]
y = [12, 10, 8.2, 6.9, 5.9]
# verified with Numbers and R
assert_fit :exponential, x, y, 0.95, 13.81148, -0.1820
end
def test_fit_linear_clean
# y = m * x + b where m = 2.2, b = 3.1
x = (1..5).to_a
y = x.map { |n| 2.2 * n + 3.1 }
assert_fit :linear, x, y, 1.0, 3.1, 2.2
end
def test_fit_linear_noisy
x = [ 60, 61, 62, 63, 65]
y = [3.1, 3.6, 3.8, 4.0, 4.1]
# verified in numbers and R
assert_fit :linear, x, y, 0.8315, -7.9635, 0.1878
end
def test_fit_power_clean
# y = A x ** B, where B = b and A = e ** a
# if, A = 1, B = 2, then
x = [1.0, 2.0, 3.0, 4.0, 5.0]
y = [1.0, 4.0, 9.0, 16.0, 25.0]
assert_fit :power, x, y, 1.0, 1.0, 2.0
end
def test_fit_power_noisy
# from www.engr.uidaho.edu/thompson/courses/ME330/lecture/least_squares.html
x = [10, 12, 15, 17, 20, 22, 25, 27, 30, 32, 35]
y = [95, 105, 125, 141, 173, 200, 253, 298, 385, 459, 602]
# verified in numbers
assert_fit :power, x, y, 0.90, 2.6217, 1.4556
# income to % of households below income amount
# http://library.wolfram.com/infocenter/Conferences/6461/PowerLaws.nb
x = [15000, 25000, 35000, 50000, 75000, 100000]
y = [0.154, 0.283, 0.402, 0.55, 0.733, 0.843]
# verified in numbers
assert_fit :power, x, y, 0.96, 3.119e-5, 0.8959
end
def assert_fit msg, x, y, fit, exp_a, exp_b
a, b, rr = send "fit_#{msg}", x, y
assert_operator rr, :>=, fit
assert_in_delta exp_a, a
assert_in_delta exp_b, b
end
end