Automatically run tests when Droid edits files.Create .factory/hooks/run-tests.sh:
#!/bin/bashset -einput=$(cat)tool_name=$(echo "$input" | jq -r '.tool_name')file_path=$(echo "$input" | jq -r '.tool_input.file_path // ""')# Only run tests after file write/editif [ "$tool_name" != "Write" ] && [ "$tool_name" != "Edit" ]; then exit 0fi# Skip non-code filesif ! echo "$file_path" | grep -qE '\.(ts|tsx|js|jsx|py|go)$'; then exit 0fi# Skip test files themselvesif echo "$file_path" | grep -qE '\.(test|spec)\.(ts|tsx|js|jsx)$'; then exit 0ficwd=$(echo "$input" | jq -r '.cwd')cd "$cwd"echo "🧪 Running tests for changed file..."# Determine test command based on file typecase "$file_path" in *.ts|*.tsx|*.js|*.jsx) # Find corresponding test file test_file=$(echo "$file_path" | sed -E 's/\.(ts|tsx|js|jsx)$/.test.\1/') if [ ! -f "$test_file" ]; then # Try alternate naming test_file=$(echo "$file_path" | sed -E 's/\.(ts|tsx|js|jsx)$/.spec.\1/') fi if [ -f "$test_file" ]; then # Run specific test file if command -v npm &> /dev/null && grep -q '"test"' package.json; then npm test -- "$test_file" 2>&1 || { echo "❌ Tests failed for $test_file" >&2 echo "Please fix the failing tests." >&2 exit 2 } echo "✓ Tests passed for $test_file" fi else echo "⚠️ No test file found for $file_path" echo "Consider creating: $test_file" fi ;; *.py) # Run pytest for Python files if command -v pytest &> /dev/null; then # Find test file test_file=$(echo "$file_path" | sed 's/\.py$//' | sed 's|^src/|tests/test_|')_test.py if [ -f "$test_file" ]; then pytest "$test_file" -v 2>&1 || { echo "❌ Tests failed" >&2 exit 2 } echo "✓ Tests passed" else echo "⚠️ No test file found at $test_file" fi fi ;; *.go) # Run go test if command -v go &> /dev/null; then package=$(dirname "$file_path") go test "./$package" -v 2>&1 || { echo "❌ Tests failed" >&2 exit 2 } echo "✓ Tests passed" fi ;;esacexit 0
Track test execution time and warn on slow tests.Create .factory/hooks/monitor-test-perf.py:
#!/usr/bin/env python3"""Monitor test execution time and report slow tests."""import jsonimport sysimport subprocessimport timeimport re# Slow test threshold in secondsSLOW_TEST_THRESHOLD = 5.0def run_tests_with_timing(test_file): """Run tests and capture timing information.""" start_time = time.time() try: result = subprocess.run( ['npm', 'test', '--', test_file, '--verbose'], capture_output=True, text=True, timeout=60 ) elapsed = time.time() - start_time # Parse test output for individual test times slow_tests = [] for line in result.stdout.split('\n'): # Look for test timing info match = re.search(r'(.*?)\s+\((\d+)ms\)', line) if match: test_name = match.group(1).strip() test_time_ms = int(match.group(2)) test_time_s = test_time_ms / 1000.0 if test_time_s > SLOW_TEST_THRESHOLD: slow_tests.append((test_name, test_time_s)) return elapsed, slow_tests, result.returncode except subprocess.TimeoutExpired: return None, [], 1try: input_data = json.load(sys.stdin) file_path = input_data.get('tool_input', {}).get('file_path', '') if not file_path or not file_path.endswith(('.test.ts', '.test.tsx', '.spec.ts', '.spec.tsx')): sys.exit(0) print(f"⏱️ Monitoring test performance for {file_path}...") elapsed, slow_tests, returncode = run_tests_with_timing(file_path) if elapsed is not None: print(f"\nTotal test time: {elapsed:.2f}s") if slow_tests: print(f"\n⚠️ Found {len(slow_tests)} slow test(s):") for test_name, test_time in slow_tests: print(f" - {test_name}: {test_time:.2f}s") print("\nConsider optimizing these tests or mocking expensive operations.") else: print("✓ All tests running within acceptable time") # Don't block on slow tests, just warn sys.exit(returncode) else: print("❌ Tests timed out", file=sys.stderr) sys.exit(2)except Exception as e: print(f"Error monitoring tests: {e}", file=sys.stderr) sys.exit(0)
Problem: Test execution blocks workflowSolution: Run only unit tests, skip integration:
# Fast unit tests onlynpm test -- --testPathPattern="unit" "$file"# Or configure in package.json{ "scripts": { "test:fast": "jest --testPathIgnorePatterns=integration" }}
Problem: Tests fail in hooks but pass manuallySolution: Check environment differences:
# Ensure same environmentexport NODE_ENV=testexport CI=true# Use project test scriptnpm test # Not direct jest call
Problem: Coverage includes generated filesSolution: Configure coverage exclusions: