-
Notifications
You must be signed in to change notification settings - Fork 87
230 lines (194 loc) · 8.65 KB
/
iai-callgrind.yml
File metadata and controls
230 lines (194 loc) · 8.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
name: iai-callgrind Benchmarks
on:
pull_request:
merge_group:
jobs:
benchmarks:
name: Run iai-callgrind benchmarks
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: actions/checkout@v6
name: Checkout PR branch
with:
fetch-depth: 0
- name: Install Rust toolchain
run: |
rustup install --profile minimal stable
rustup default stable
- name: Install valgrind
run: sudo apt-get update && sudo apt-get install -y valgrind
- name: Install iai-callgrind-runner
uses: baptiste0928/cargo-install@v3
with:
crate: iai-callgrind-runner
- uses: Swatinem/rust-cache@v2
with:
key: iai-callgrind
- name: Get base branch name
id: base_branch
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "name=${{ github.base_ref }}" >> "$GITHUB_OUTPUT"
else
echo "name=main" >> "$GITHUB_OUTPUT"
fi
- name: Checkout base branch
run: |
git fetch origin ${{ steps.base_branch.outputs.name }}
git checkout origin/${{ steps.base_branch.outputs.name }}
- name: Run benchmarks on base branch
continue-on-error: true
run: |
echo "Running benchmarks on base branch: ${{ steps.base_branch.outputs.name }}"
cargo bench --features iai --bench iai_algos --bench iai_edmondskarp --bench iai_kuhn_munkres --bench iai_separate_components 2>&1 | tee baseline-output.txt
- name: Checkout PR branch
run: git checkout ${{ github.sha }}
- name: Clear target directory for PR build
run: cargo clean
- name: Run benchmarks on PR branch
run: |
echo "Running benchmarks on PR branch"
cargo bench --features iai --bench iai_algos --bench iai_edmondskarp --bench iai_kuhn_munkres --bench iai_separate_components 2>&1 | tee pr-output.txt
- name: Parse and compare results
if: github.event_name == 'pull_request'
id: parse_results
run: |
python3 << 'EOF'
import re
import os
def parse_benchmark_output(filename):
"""Parse iai-callgrind output and extract benchmark results."""
benchmarks = {}
try:
with open(filename, 'r') as f:
content = f.read()
# Pattern to match benchmark names and their metrics
benchmark_pattern = r'([^\n]+?)::[^\n]+?::([^\n]+?)\n\s+Instructions:\s+(\d+)'
for match in re.finditer(benchmark_pattern, content):
bench_name = f"{match.group(1)}::{match.group(2)}"
instructions = int(match.group(3))
benchmarks[bench_name] = instructions
except FileNotFoundError:
pass
return benchmarks
baseline = parse_benchmark_output('baseline-output.txt')
pr_results = parse_benchmark_output('pr-output.txt')
# Create markdown comment
comment = "## 📊 iai-callgrind Benchmark Results\n\n"
if not baseline:
comment += "⚠️ **No baseline benchmarks found.** This may be the first time these benchmarks are run on the base branch.\n\n"
comment += "### PR Branch Results\n\n"
comment += "| Benchmark | Instructions |\n"
comment += "|-----------|-------------|\n"
for name, instr in sorted(pr_results.items()):
comment += f"| `{name}` | {instr:,} |\n"
else:
# Compare results
improvements = []
regressions = []
unchanged = []
new_benchmarks = []
for name, pr_instr in sorted(pr_results.items()):
if name in baseline:
base_instr = baseline[name]
diff = pr_instr - base_instr
pct_change = (diff / base_instr) * 100 if base_instr > 0 else 0
result = {
'name': name,
'base': base_instr,
'pr': pr_instr,
'diff': diff,
'pct': pct_change
}
if abs(pct_change) < 0.1: # Less than 0.1% change
unchanged.append(result)
elif diff < 0:
improvements.append(result)
else:
regressions.append(result)
else:
new_benchmarks.append({'name': name, 'pr': pr_instr})
# Summary
if regressions:
comment += f"### ⚠️ {len(regressions)} Regression(s) Detected\n\n"
comment += "| Benchmark | Base | PR | Change | % |\n"
comment += "|-----------|------|----|---------|\n"
for r in sorted(regressions, key=lambda x: abs(x['pct']), reverse=True):
comment += f"| `{r['name']}` | {r['base']:,} | {r['pr']:,} | +{r['diff']:,} | +{r['pct']:.2f}% |\n"
comment += "\n"
if improvements:
comment += f"### ✅ {len(improvements)} Improvement(s)\n\n"
comment += "| Benchmark | Base | PR | Change | % |\n"
comment += "|-----------|------|----|---------|\n"
for r in sorted(improvements, key=lambda x: abs(x['pct']), reverse=True):
comment += f"| `{r['name']}` | {r['base']:,} | {r['pr']:,} | {r['diff']:,} | {r['pct']:.2f}% |\n"
comment += "\n"
if unchanged:
comment += f"### ➡️ {len(unchanged)} Unchanged (within ±0.1%)\n\n"
comment += "<details><summary>Click to expand</summary>\n\n"
comment += "| Benchmark | Instructions |\n"
comment += "|-----------|-------------|\n"
for r in unchanged:
comment += f"| `{r['name']}` | {r['pr']:,} |\n"
comment += "\n</details>\n\n"
if new_benchmarks:
comment += f"### 🆕 {len(new_benchmarks)} New Benchmark(s)\n\n"
comment += "| Benchmark | Instructions |\n"
comment += "|-----------|-------------|\n"
for nb in new_benchmarks:
comment += f"| `{nb['name']}` | {nb['pr']:,} |\n"
comment += "\n"
if not regressions and not improvements and not new_benchmarks:
comment += "### ✅ All benchmarks unchanged\n\n"
comment += "\n---\n"
comment += "*iai-callgrind measures instructions executed, which is deterministic and not affected by system load.*\n"
# Write to file
with open('comment.txt', 'w') as f:
f.write(comment)
print("Comment generated successfully")
EOF
- name: Post comment to PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
const comment = fs.readFileSync('comment.txt', 'utf8');
// Find existing comment
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('iai-callgrind Benchmark Results')
);
if (botComment) {
// Update existing comment
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: comment
});
} else {
// Create new comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment
});
}
- name: Add summary
if: always()
run: |
if [ -f comment.txt ]; then
cat comment.txt >> $GITHUB_STEP_SUMMARY
else
echo "## Benchmark Results" >> $GITHUB_STEP_SUMMARY
echo "Benchmark comparison was not generated." >> $GITHUB_STEP_SUMMARY
fi