Minitest

RubyTesting FrameworkTDDBDDMockingBenchmarking

Minitest

Overview

Minitest is a complete test suite for Ruby. It's designed as a lightweight and fast testing framework that supports TDD, BDD, mocking, and benchmarking. Adopted as the standard library since Ruby 1.9, it serves as the successor to Test::Unit. With its simple, intuitive API and rich functionality, it has become the de facto testing standard in modern Ruby development.

Details

Key Features

  • Complete Test Suite: Integrated support for TDD, BDD, mocking, and benchmarking
  • Lightweight Design: High-speed operation with minimal dependencies
  • Flexible Styles: Support for both Test::Unit style and spec (RSpec-like) style
  • Stubbing Capabilities: Built-in stub and mock functionality
  • Parallel Execution: Parallel test execution with multiprocess and multithreading
  • Benchmarking: Built-in performance measurement capabilities
  • Customizable: Extensibility through plugins and reporters

Architecture

Minitest consists of the following core components:

  • Test: Basic test class (Test::Unit compatible)
  • Spec: BDD-style testing with spec notation
  • Mock: Mock objects and stub functionality
  • Benchmark: Performance measurement and benchmarking
  • Reporter: Test result display and formatting
  • Parallel: Parallel test execution engine

Pros and Cons

Pros

  • Standard Integration: Stable provision as Ruby standard library
  • High Performance: Fast test execution through lightweight design
  • Multiple Styles: Support for both TDD and BDD
  • Built-in Features: Standard equipment with mock, stub, and benchmark capabilities
  • Parallel Execution: Fast execution of large test suites
  • Rich Assertions: Support for diverse testing scenarios

Cons

  • Simple Design: Advanced BDD features are inferior to RSpec
  • Ecosystem: Less rich plugin ecosystem compared to RSpec
  • Learning Curve: Confusion for beginners due to multiple styles
  • Advanced Mocking: More complex mock functionality requires external libraries

References

Code Examples

Basic Test::Unit Style

require 'minitest/autorun'

class CalculatorTest < Minitest::Test
  def setup
    @calculator = Calculator.new
  end
  
  def test_addition
    assert_equal 4, @calculator.add(2, 2)
    assert_equal 0, @calculator.add(-1, 1)
    assert_equal -3, @calculator.add(-1, -2)
  end
  
  def test_subtraction
    assert_equal 0, @calculator.subtract(2, 2)
    assert_equal 5, @calculator.subtract(8, 3)
    assert_equal -1, @calculator.subtract(2, 3)
  end
  
  def test_division_by_zero
    assert_raises(ZeroDivisionError) do
      @calculator.divide(5, 0)
    end
  end
  
  def test_with_detailed_message
    result = @calculator.multiply(3, 4)
    assert_equal 12, result, "3 * 4 should equal 12"
  end
end

Spec Style (BDD)

require 'minitest/autorun'

describe Calculator do
  before do
    @calculator = Calculator.new
  end
  
  describe "addition" do
    it "adds positive numbers" do
      _(@calculator.add(2, 3)).must_equal 5
    end
    
    it "adds negative numbers" do
      _(@calculator.add(-2, -3)).must_equal -5
    end
    
    it "handles zero" do
      _(@calculator.add(0, 5)).must_equal 5
      _(@calculator.add(5, 0)).must_equal 5
    end
  end
  
  describe "division" do
    it "divides numbers correctly" do
      _(@calculator.divide(10, 2)).must_equal 5
    end
    
    it "raises error for division by zero" do
      -> { @calculator.divide(5, 0) }.must_raise ZeroDivisionError
    end
  end
  
  describe "edge cases" do
    it "handles floating point numbers" do
      result = @calculator.add(0.1, 0.2)
      _(result).must_be_within_delta 0.3, 0.001
    end
  end
end

Rich Assertions

require 'minitest/autorun'

class AssertionTest < Minitest::Test
  def test_equality_assertions
    # Basic equality
    assert_equal "hello", "hello"
    refute_equal "hello", "world"
    
    # Object identity
    obj = Object.new
    assert_same obj, obj
    refute_same Object.new, Object.new
  end
  
  def test_boolean_assertions
    assert true
    refute false
    
    # nil check
    assert_nil nil
    refute_nil "not nil"
  end
  
  def test_collection_assertions
    array = [1, 2, 3, 4, 5]
    
    assert_includes array, 3
    refute_includes array, 6
    
    assert_empty []
    refute_empty array
  end
  
  def test_type_assertions
    assert_instance_of String, "hello"
    assert_kind_of Numeric, 42
    assert_respond_to [], :push
  end
  
  def test_pattern_assertions
    assert_match /hello/, "hello world"
    refute_match /goodbye/, "hello world"
  end
  
  def test_exception_assertions
    assert_raises(ArgumentError) do
      raise ArgumentError, "test error"
    end
    
    exception = assert_raises(RuntimeError) do
      raise "custom error"
    end
    assert_equal "custom error", exception.message
  end
  
  def test_numeric_assertions
    assert_in_delta 3.14159, Math::PI, 0.001
    assert_in_epsilon 1000, 1010, 0.1 # Allow 10% error
    
    assert_operator 5, :>, 3
    assert_operator "abc", :<, "def"
  end
end

Stubs and Mocks

require 'minitest/autorun'

class StubMockTest < Minitest::Test
  def test_stubbing_methods
    # Method stubbing
    Time.stub :now, Time.at(0) do
      assert_equal Time.at(0), Time.now
    end
    
    # Stub returning multiple values in sequence
    sequence = [1, 2, 3].each
    Number.stub :random, -> { sequence.next } do
      assert_equal 1, Number.random
      assert_equal 2, Number.random
      assert_equal 3, Number.random
    end
  end
  
  def test_mock_objects
    # Create mock object
    mock_logger = Minitest::Mock.new
    
    # Set expected calls
    mock_logger.expect :info, true, ["Processing started"]
    mock_logger.expect :info, true, ["Processing completed"]
    
    # Test code using the mock
    processor = DataProcessor.new(mock_logger)
    processor.process_data("test data")
    
    # Verify all expected calls were made
    mock_logger.verify
  end
  
  def test_partial_mocking
    user = User.new("John")
    
    # Stub part of existing object's methods
    user.stub :save, true do
      assert user.update_profile("New Name")
    end
  end
  
  def test_stub_with_different_arguments
    calculator = Calculator.new
    
    # Set different return values based on arguments
    calculator.stub :add, 10 do
      calculator.stub :add, 20 do
        assert_equal 10, calculator.add(2, 3)
      end
    end
  end
end

Data-Driven Testing

require 'minitest/autorun'

class DataDrivenTest < Minitest::Test
  # Test data definition
  TEST_CASES = [
    { input: [2, 3], expected: 5, operation: :add },
    { input: [10, 4], expected: 6, operation: :subtract },
    { input: [3, 4], expected: 12, operation: :multiply },
    { input: [15, 3], expected: 5, operation: :divide }
  ]
  
  def test_calculator_operations
    calculator = Calculator.new
    
    TEST_CASES.each do |test_case|
      result = calculator.send(test_case[:operation], *test_case[:input])
      assert_equal test_case[:expected], result,
        "Failed #{test_case[:operation]} with #{test_case[:input]}"
    end
  end
  
  # Dynamically generate test methods
  TEST_CASES.each_with_index do |test_case, index|
    define_method "test_#{test_case[:operation]}_case_#{index}" do
      calculator = Calculator.new
      result = calculator.send(test_case[:operation], *test_case[:input])
      assert_equal test_case[:expected], result
    end
  end
end

Parallel Test Execution

require 'minitest/autorun'

# Enable parallel execution
parallelize_me!

class ParallelTest < Minitest::Test
  def test_independent_operation_1
    # Independent test processing
    sleep 0.1
    assert_equal 4, 2 + 2
  end
  
  def test_independent_operation_2
    # Independent test processing
    sleep 0.1
    assert_equal 6, 2 * 3
  end
  
  def test_independent_operation_3
    # Independent test processing
    sleep 0.1
    assert_equal 1, 3 - 2
  end
end

# Test class that should avoid parallel execution
class SerialTest < Minitest::Test
  # This class doesn't run in parallel
  def test_shared_resource_operation
    # Test using shared resources
    GlobalCounter.increment
    assert_equal 1, GlobalCounter.value
  end
end

Benchmark Testing

require 'minitest/autorun'
require 'minitest/benchmark'

class BenchmarkTest < Minitest::Benchmark
  # Specify size range for benchmarking
  def self.bench_range
    [100, 1_000, 10_000, 100_000]
  end
  
  def bench_array_creation
    assert_performance_linear 0.9999 do |n|
      Array.new(n) { |i| i }
    end
  end
  
  def bench_hash_lookup
    data = Hash[(1..input_size).map { |i| [i, i * 2] }]
    
    assert_performance_constant 0.99 do |n|
      n.times { data[rand(input_size)] }
    end
  end
  
  def bench_string_concatenation
    # Expect linear performance
    assert_performance_linear 0.9 do |n|
      str = ""
      n.times { |i| str += i.to_s }
    end
  end
  
  def bench_custom_algorithm
    # Benchmark custom algorithm
    assert_performance_logarithmic 0.9 do |n|
      binary_search(generate_sorted_array(n), n / 2)
    end
  end
  
  private
  
  def input_size
    100_000
  end
  
  def binary_search(array, target)
    # Binary search implementation
    low, high = 0, array.length - 1
    
    while low <= high
      mid = (low + high) / 2
      case array[mid] <=> target
      when -1 then low = mid + 1
      when 1 then high = mid - 1
      else return mid
      end
    end
    
    -1
  end
  
  def generate_sorted_array(size)
    (1..size).to_a
  end
end

Custom Assertions and Helpers

require 'minitest/autorun'

module CustomAssertions
  def assert_valid_email(email, message = nil)
    message ||= "Expected #{email} to be a valid email"
    email_regex = /\A[\w+\-.]+@[a-z\d\-]+(\.[a-z\d\-]+)*\.[a-z]+\z/i
    assert email_regex.match?(email), message
  end
  
  def assert_json_response(response, expected_status = 200)
    assert_equal expected_status, response.status
    assert_match /application\/json/, response.content_type
    JSON.parse(response.body)
  rescue JSON::ParserError
    flunk "Response body is not valid JSON: #{response.body}"
  end
  
  def assert_difference(expression, difference = 1, message = nil, &block)
    before = eval(expression)
    yield
    after = eval(expression)
    
    actual_difference = after - before
    assert_equal difference, actual_difference, message
  end
end

class CustomTest < Minitest::Test
  include CustomAssertions
  
  def test_email_validation
    assert_valid_email "[email protected]"
    
    assert_raises(Minitest::Assertion) do
      assert_valid_email "invalid-email"
    end
  end
  
  def test_json_api_response
    response = simulate_api_call
    data = assert_json_response(response, 200)
    
    assert_includes data, 'user_id'
    assert_equal 'John', data['name']
  end
  
  def test_counter_increment
    counter = Counter.new
    
    assert_difference('counter.value', 2) do
      counter.increment
      counter.increment
    end
  end
  
  private
  
  def simulate_api_call
    OpenStruct.new(
      status: 200,
      content_type: 'application/json',
      body: '{"user_id": 123, "name": "John"}'
    )
  end
end

Advanced Test Configuration

require 'minitest/autorun'

# Create custom reporter
class CustomReporter < Minitest::AbstractReporter
  def initialize(io = $stdout, options = {})
    super
    @io = io
  end
  
  def start
    @io.puts "🚀 Starting test suite..."
  end
  
  def record(result)
    case result.result_code
    when '.'
      @io.print "✅"
    when 'F'
      @io.print "❌"
    when 'E'
      @io.print "💥"
    when 'S'
      @io.print "⏭️"
    end
  end
  
  def report
    @io.puts "\n🏁 Test suite completed!"
    super
  end
end

# Configuration before test execution
Minitest.reporter = CustomReporter.new

class AdvancedConfigTest < Minitest::Test
  # Conditional testing
  def test_environment_specific
    skip "This test only runs in production" unless ENV['RAILS_ENV'] == 'production'
    
    # Production environment specific test
    assert production_feature_enabled?
  end
  
  def test_with_timeout
    # Test with timeout (using external library)
    Timeout::timeout(5) do
      slow_operation
    end
  rescue Timeout::Error
    flunk "Operation took too long"
  end
  
  def test_flaky_test_with_retry
    retries = 0
    
    begin
      # Flaky test processing
      flaky_operation
    rescue StandardError => e
      retries += 1
      retry if retries < 3
      raise e
    end
  end
  
  private
  
  def production_feature_enabled?
    # Production environment feature flag check
    true
  end
  
  def slow_operation
    sleep 1
    true
  end
  
  def flaky_operation
    # Test that fails 30% of the time
    raise "Flaky failure" if rand < 0.3
    true
  end
end