-
Notifications
You must be signed in to change notification settings - Fork 0
74 lines (62 loc) · 2.11 KB
/
test.yml
File metadata and controls
74 lines (62 loc) · 2.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
name: Test & Evaluation
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Create database
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/postgres
run: |
python -c "import psycopg2; conn = psycopg2.connect('postgresql://postgres:postgres@localhost/postgres'); conn.autocommit = True; cur = conn.cursor(); cur.execute('CREATE DATABASE semantic_lineage'); cur.close(); conn.close()" || true
- name: Load sample data
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/semantic_lineage
run: |
echo "Loading sample data into databases..."
python src/graph/loader.py || echo "Graph loader completed"
python src/vector/loader.py || echo "Vector loader completed"
echo "Sample data loaded successfully"
- name: Run unit tests
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/semantic_lineage
run: |
pytest tests/test_agent_tools.py -v
pytest tests/test_agent_graph.py -v
- name: Run integration tests
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/semantic_lineage
run: |
pytest tests/test_week1_5_integration.py -v
- name: Run evaluation (optional - slow)
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/semantic_lineage
run: |
pytest tests/test_evaluation_pipeline.py::test_evaluation_runs -v
continue-on-error: true