pytest

PythonTestingUnit TestingTest FrameworkFixturesParametrizationPlugins

Testing Framework

pytest

Overview

pytest is the most popular testing framework for Python, offering simple syntax for beginners while providing powerful fixture systems, parametrization, and a rich plugin ecosystem that scales to projects of any size. With its advanced assertion introspection and detailed error reporting, pytest is widely adopted from individual developers to enterprise-level applications.

Details

Features

  • Simple Syntax: Function-based test writing with low learning curve
  • Powerful Fixture System: Automated test data setup and cleanup
  • Parametrized Testing: Efficient testing of same logic with multiple input values
  • Rich Plugin Ecosystem: Over 800 plugins for functionality extension
  • Detailed Assertions: Advanced assert statement analysis with specific error information
  • Flexible Execution Control: Test classification and selective execution using markers
  • Parallel Execution Support: Distributed test execution with pytest-xdist

Technical Background

pytest development began in 2004, designed to solve limitations of Python's standard unittest module. Its function-based approach avoids the complexity of class inheritance, enabling more intuitive test writing. The fixture system adopts dependency injection patterns, significantly improving test reusability and maintainability.

Ecosystem

pytest offers rich integration options including pytest-cov for coverage measurement, pytest-django for Django integration, pytest-asyncio for asynchronous testing, and pytest-bdd for BDD (Behavior-Driven Development) support, among many framework and library integrations.

Pros and Cons

Pros

  • Easy Learning: Immediate test creation with assert statements and simple functions
  • High Productivity: Efficient test creation through fixtures and parametrization
  • Excellent Diagnostics: Detailed failure information for easy debugging
  • Extensibility: Flexible customization through plugin system
  • Enterprise Ready: Proven track record and stability in large-scale projects
  • Active Community: Continuous feature additions and support

Cons

  • Feature Complexity: Time required to master advanced features
  • Plugin Dependencies: Certain functionalities require additional plugins
  • Execution Speed: Slightly heavier than unittest for small-scale tests
  • Configuration Complexity: Configuration management can become complex in large projects

Reference Pages

Code Examples

Hello World (Basic Testing)

# test_basic.py
def test_addition():
    """Basic addition test"""
    assert 2 + 3 == 5

def test_string_operations():
    """String operation tests"""
    text = "Hello, pytest!"
    assert text.startswith("Hello")
    assert "pytest" in text
    assert text.endswith("!")

# Run: pytest test_basic.py

Fixture Usage Examples

# test_fixtures.py
import pytest
import tempfile
import os

@pytest.fixture
def sample_data():
    """Fixture providing test data"""
    return {
        "users": ["Alice", "Bob", "Charlie"],
        "scores": [85, 92, 78]
    }

@pytest.fixture
def temp_file():
    """Fixture for creating and cleaning up temporary files"""
    # Setup
    temp_fd, temp_path = tempfile.mkstemp()
    os.close(temp_fd)
    
    yield temp_path  # Pass to test
    
    # Cleanup
    if os.path.exists(temp_path):
        os.unlink(temp_path)

def test_data_processing(sample_data):
    """Test using fixtures"""
    users = sample_data["users"]
    scores = sample_data["scores"]
    
    assert len(users) == len(scores)
    assert max(scores) == 92
    assert "Alice" in users

def test_file_operations(temp_file):
    """File operation tests"""
    # Write to file
    with open(temp_file, 'w') as f:
        f.write("Test content")
    
    # Read from file
    with open(temp_file, 'r') as f:
        content = f.read()
    
    assert content == "Test content"

Parametrized Testing

# test_parametrize.py
import pytest

@pytest.mark.parametrize("input_value,expected", [
    (2, 4),
    (3, 9),
    (4, 16),
    (5, 25),
])
def test_square(input_value, expected):
    """Parametrized testing for multiple cases"""
    assert input_value ** 2 == expected

@pytest.mark.parametrize("text,should_be_valid", [
    ("[email protected]", True),
    ("[email protected]", True),
    ("invalid-email", False),
    ("@domain.com", False),
    ("user@", False),
])
def test_email_validation(text, should_be_valid):
    """Parametrized email validation testing"""
    import re
    pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
    is_valid = bool(re.match(pattern, text))
    assert is_valid == should_be_valid

# Multiple parameter combinations
@pytest.mark.parametrize("x", [1, 2])
@pytest.mark.parametrize("y", [10, 20])
def test_multiplication(x, y):
    """Testing all combinations of multiple parameters"""
    result = x * y
    assert result > 0
    assert result == x * y

Exception Testing

# test_exceptions.py
import pytest

def divide(a, b):
    """Division function with zero division check"""
    if b == 0:
        raise ValueError("Cannot divide by zero")
    return a / b

class CustomError(Exception):
    """Custom exception class"""
    pass

def risky_function(value):
    """Simulate risky processing"""
    if value < 0:
        raise CustomError("Negative values not allowed")
    if value == 0:
        raise ValueError("Zero is not valid")
    return value * 2

def test_zero_division():
    """Test zero division exception"""
    with pytest.raises(ValueError, match="Cannot divide by zero"):
        divide(10, 0)

def test_successful_division():
    """Test successful division"""
    result = divide(10, 2)
    assert result == 5.0

def test_custom_exception():
    """Test custom exception"""
    with pytest.raises(CustomError, match="Negative values not allowed"):
        risky_function(-1)

def test_multiple_exceptions():
    """Test multiple exception patterns"""
    # ValueError exception
    with pytest.raises(ValueError, match="Zero is not valid"):
        risky_function(0)
    
    # CustomError exception
    with pytest.raises(CustomError):
        risky_function(-5)

def test_exception_info():
    """Test detailed exception information"""
    with pytest.raises(ValueError) as excinfo:
        divide(1, 0)
    
    assert "Cannot divide by zero" in str(excinfo.value)
    assert excinfo.type == ValueError

Marking (Custom Markers)

# test_markers.py
import pytest

# Custom marker definition (in pytest.ini or conftest.py)
# [pytest]
# markers =
#     slow: marks tests as slow
#     integration: marks tests as integration tests
#     unit: marks tests as unit tests
#     smoke: marks tests as smoke tests

@pytest.mark.unit
def test_fast_calculation():
    """Fast unit test"""
    assert 2 + 2 == 4

@pytest.mark.slow
def test_complex_calculation():
    """Time-consuming test"""
    import time
    time.sleep(0.1)  # Simulate heavy processing
    result = sum(range(1000))
    assert result == 499500

@pytest.mark.integration
def test_database_connection():
    """Integration test (mock)"""
    # Simulate database connection
    connection_status = True
    assert connection_status

@pytest.mark.smoke
def test_basic_functionality():
    """Smoke test"""
    assert True

@pytest.mark.parametrize("env", ["development", "staging", "production"])
@pytest.mark.integration
def test_environment_specific(env):
    """Environment-specific testing"""
    assert env in ["development", "staging", "production"]

# Execution examples:
# pytest -m "unit"                    # Run only unit tests
# pytest -m "not slow"               # Run all except slow tests
# pytest -m "integration or smoke"   # Run integration and smoke tests

Plugin Utilization

# conftest.py - Project-wide configuration
import pytest
import logging

# Custom fixtures
@pytest.fixture(scope="session")
def api_client():
    """API client session fixture"""
    class MockAPIClient:
        def __init__(self):
            self.base_url = "https://api.example.com"
            self.connected = True
        
        def get(self, endpoint):
            return {"status": "success", "data": f"Data from {endpoint}"}
        
        def post(self, endpoint, data):
            return {"status": "created", "id": 123}
        
        def close(self):
            self.connected = False
    
    client = MockAPIClient()
    yield client
    client.close()

@pytest.fixture
def logger():
    """Logger fixture"""
    logging.basicConfig(level=logging.INFO)
    return logging.getLogger(__name__)

# Custom marker configuration
def pytest_configure(config):
    """Customize pytest configuration"""
    config.addinivalue_line(
        "markers", "api: mark test as API test"
    )
    config.addinivalue_line(
        "markers", "database: mark test as database test"
    )

# Test file: test_plugins.py
import pytest

@pytest.mark.api
def test_api_get(api_client, logger):
    """Test using API plugin"""
    logger.info("Testing API GET request")
    response = api_client.get("/users")
    
    assert response["status"] == "success"
    assert "data" in response

@pytest.mark.api
def test_api_post(api_client):
    """POST test using API plugin"""
    data = {"name": "New User", "email": "[email protected]"}
    response = api_client.post("/users", data)
    
    assert response["status"] == "created"
    assert response["id"] == 123

# pytest-cov usage example (coverage measurement)
# pip install pytest-cov
# Run: pytest --cov=mymodule --cov-report=html

# pytest-xdist usage example (parallel execution)
# pip install pytest-xdist
# Run: pytest -n 4  # Parallel execution with 4 processes

# pytest-mock usage example (enhanced mocking)
# pip install pytest-mock
def test_with_mock(mocker):
    """Mock testing using pytest-mock"""
    mock_function = mocker.patch('mymodule.expensive_function')
    mock_function.return_value = "mocked result"
    
    # Execute test
    from mymodule import use_expensive_function
    result = use_expensive_function()
    
    assert result == "mocked result"
    mock_function.assert_called_once()