QA Engineer Skills 2026QA-2026Error Handling and Environment Management

Error Handling and Environment Management

Testing error handling is as important as testing success paths. A well-designed API returns helpful error messages without leaking internal details. Rate limiting protects against abuse. And your test suite must be parameterized to run against any environment.


Error Response Validation

Error responses should be structured, informative for the client, and silent about internals.

def test_error_response_structure(api):
    """Error responses should have a consistent format."""
    r = api.post("/users", json={})  # Missing required fields
    assert r.status_code in (400, 422)
    error = r.json()
    # Error should have a message and optionally field-level details
    assert "message" in error or "error" in error
    # If field-level errors are returned, verify structure
    if "errors" in error:
        for field_error in error["errors"]:
            assert "field" in field_error
            assert "message" in field_error

def test_error_does_not_leak_internals(api):
    """Error responses should never expose stack traces or internal paths."""
    r = api.get("/users/nonexistent-id-format")
    body = r.text.lower()
    assert "traceback" not in body
    assert "/usr/src/app" not in body
    assert "node_modules" not in body
    assert "sql" not in body.lower() or r.status_code != 500  # SQL errors in 500 = bad
    assert "at object." not in body  # JavaScript stack trace

def test_validation_error_identifies_field(api):
    """Validation errors should tell the client which field is wrong."""
    r = api.post("/users", json={
        "name": "",
        "email": "not-an-email",
        "role": "invalid_role"
    })
    assert r.status_code in (400, 422)
    error_text = r.text.lower()
    # At least one of the invalid fields should be mentioned
    assert any(field in error_text for field in ["name", "email", "role"])

Rate Limiting

APIs should enforce rate limits to prevent abuse. Your tests should verify this protection exists.

def test_rate_limiting_returns_429(base_url):
    """Sending too many requests should trigger rate limiting."""
    responses = []
    for _ in range(150):
        r = requests.get(f"{base_url}/public/health")
        responses.append(r)
        if r.status_code == 429:
            break
    assert any(r.status_code == 429 for r in responses), \
        "Rate limiting was not triggered after 150 requests"

def test_rate_limit_includes_retry_after(base_url):
    """429 responses should include Retry-After header."""
    # Trigger rate limiting first
    for _ in range(200):
        r = requests.get(f"{base_url}/public/health")
        if r.status_code == 429:
            break
    if r.status_code == 429:
        assert "Retry-After" in r.headers, "429 response missing Retry-After header"
        retry_after = int(r.headers["Retry-After"])
        assert retry_after > 0

def test_rate_limit_resets(base_url):
    """After waiting, rate limit should reset."""
    import time
    # Trigger rate limiting
    for _ in range(200):
        r = requests.get(f"{base_url}/public/health")
        if r.status_code == 429:
            break

    if r.status_code == 429:
        retry_after = int(r.headers.get("Retry-After", 5))
        time.sleep(retry_after + 1)
        r = requests.get(f"{base_url}/public/health")
        assert r.status_code == 200, "Rate limit did not reset after Retry-After period"

Malformed Request Testing

def test_invalid_json_body(api):
    """Sending invalid JSON should return 400, not 500."""
    r = requests.post(f"{api._base_url}/users",
        data="this is not json",
        headers={**api.headers, "Content-Type": "application/json"})
    assert r.status_code == 400

def test_wrong_content_type(api):
    """Sending form data to a JSON endpoint should be handled gracefully."""
    r = requests.post(f"{api._base_url}/users",
        data="name=test&email=test@test.com",
        headers={**api.headers, "Content-Type": "application/x-www-form-urlencoded"})
    assert r.status_code in (400, 415)  # Bad Request or Unsupported Media Type

def test_extra_fields_ignored_or_rejected(api):
    """Unknown fields should be ignored or rejected, not cause errors."""
    r = api.post("/users", json={
        "name": "Test",
        "email": "test@test.com",
        "role": "viewer",
        "unknown_field": "should be ignored",
        "admin_override": True  # Potential mass assignment attack
    })
    assert r.status_code in (201, 400)  # Either accepted (ignoring) or rejected
    if r.status_code == 201:
        # If accepted, verify extra fields were NOT persisted
        user = api.get(f"/users/{r.json()['id']}").json()
        assert "unknown_field" not in user
        assert "admin_override" not in user

def test_extremely_large_payload(api):
    """Very large payloads should be rejected, not crash the server."""
    r = api.post("/users", json={
        "name": "A" * 1_000_000,
        "email": "test@test.com"
    })
    assert r.status_code in (400, 413)  # Bad Request or Payload Too Large

HTTP Method Enforcement

def test_unsupported_method(api):
    """Using an unsupported HTTP method should return 405."""
    r = api.request("PATCH", "/health")
    assert r.status_code in (404, 405)  # Not Found or Method Not Allowed

    r = api.request("DELETE", "/health")
    assert r.status_code in (404, 405)

Environment Parameterization

Your test suite must run against any environment with a single configuration change.

# conftest.py
import os
import pytest

@pytest.fixture(scope="session")
def base_url():
    return os.environ.get("API_BASE_URL", "http://localhost:3000/api/v1")
# Run against different environments
API_BASE_URL=http://localhost:3000/api/v1 pytest tests/api/
API_BASE_URL=https://api.staging.example.com/v1 pytest tests/api/
API_BASE_URL=https://api.example.com/v1 pytest tests/api/ -m "readonly"

Best Practices

Practice Why
Environment variables for secrets Never commit tokens to source control
Default to localhost Tests run without configuration out of the box
Tag destructive tests Exclude create/modify tests from production runs
Use .env files for local development Keep local config out of command line
Different credentials per environment Staging admin != production admin

Marking Tests for Environment Safety

@pytest.mark.readonly
def test_list_users(api):
    """Safe to run against production — only reads data."""
    r = api.get("/users")
    assert r.status_code == 200

@pytest.mark.destructive
def test_delete_user(api, create_user):
    """Not safe for production — modifies data."""
    user = create_user()
    r = api.delete(f"/users/{user['id']}")
    assert r.status_code == 204
# pytest.ini
[pytest]
markers =
    readonly: Tests that only read data (safe for production)
    destructive: Tests that create, modify, or delete data
    smoke: Quick health check tests

Practical Exercise

  1. Write tests that verify error responses are structured and do not leak internals
  2. Write a rate limiting test that triggers 429 and verifies the Retry-After header
  3. Write tests for malformed requests: invalid JSON, wrong content type, oversized payload
  4. Configure environment parameterization with at least 2 environments
  5. Tag your tests as readonly/destructive and run only readonly tests

Key Takeaways

  • Test error paths as thoroughly as success paths
  • Error responses should be structured, informative, and free of internal details
  • Rate limiting must exist and must include Retry-After headers
  • Malformed requests should return 400, never 500
  • Parameterize environments so the same suite runs against dev, staging, and production
  • Tag destructive tests to prevent accidental data modification in production