-
-
Notifications
You must be signed in to change notification settings - Fork 802
166 lines (143 loc) · 6.2 KB
/
benchmark.yml
File metadata and controls
166 lines (143 loc) · 6.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
name: Performance Benchmarks
on:
pull_request:
paths:
- 'bbot/**/*.py'
- 'pyproject.toml'
- '.github/workflows/benchmark.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0 # Need full history for branch comparison
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.11"
- name: Install dependencies
run: |
pip install poetry
poetry install --with dev
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libmagic1
# Generate benchmark comparison report using our branch-based script
- name: Generate benchmark comparison report
run: |
poetry run python bbot/scripts/benchmark_report.py \
--base ${{ github.base_ref }} \
--current ${{ github.head_ref }} \
--output benchmark_report.md \
--keep-results
continue-on-error: true
# Upload benchmark results as artifacts
- name: Upload benchmark results
uses: actions/upload-artifact@v6
with:
name: benchmark-results
path: |
benchmark_report.md
base_benchmark_results.json
current_benchmark_results.json
retention-days: 30
# Comment on PR with benchmark results
- name: Comment benchmark results on PR
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
try {
const report = fs.readFileSync('benchmark_report.md', 'utf8');
// Find existing benchmark comment (with pagination)
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
per_page: 100, // Get more comments per page
});
// Debug: log all comments to see what we're working with
console.log(`Found ${comments.data.length} comments on this PR`);
comments.data.forEach((comment, index) => {
console.log(`Comment ${index}: user=${comment.user.login}, body preview="${comment.body.substring(0, 100)}..."`);
});
const existingComments = comments.data.filter(comment =>
comment.body.toLowerCase().includes('performance benchmark') &&
comment.user.login === 'github-actions[bot]'
);
console.log(`Found ${existingComments.length} existing benchmark comments`);
if (existingComments.length > 0) {
// Sort comments by creation date to find the most recent
const sortedComments = existingComments.sort((a, b) =>
new Date(b.created_at) - new Date(a.created_at)
);
const mostRecentComment = sortedComments[0];
console.log(`Updating most recent benchmark comment: ${mostRecentComment.id} (created: ${mostRecentComment.created_at})`);
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: mostRecentComment.id,
body: report
});
console.log('Updated existing benchmark comment');
// Delete any older duplicate comments
if (existingComments.length > 1) {
console.log(`Deleting ${existingComments.length - 1} older duplicate comments`);
for (let i = 1; i < sortedComments.length; i++) {
const commentToDelete = sortedComments[i];
console.log(`Attempting to delete comment ${commentToDelete.id} (created: ${commentToDelete.created_at})`);
try {
await github.rest.issues.deleteComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: commentToDelete.id
});
console.log(`Successfully deleted duplicate comment: ${commentToDelete.id}`);
} catch (error) {
console.error(`Failed to delete comment ${commentToDelete.id}: ${error.message}`);
console.error(`Error details:`, error);
}
}
}
} else {
// Create new comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: report
});
console.log('Created new benchmark comment');
}
} catch (error) {
console.error('Failed to post benchmark results:', error);
// Post a fallback comment
const fallbackMessage = [
'## Performance Benchmark Report',
'',
'> ⚠️ **Failed to generate detailed benchmark comparison**',
'> ',
'> The benchmark comparison failed to run. This might be because:',
'> - Benchmark tests don\'t exist on the base branch yet',
'> - Dependencies are missing',
'> - Test execution failed',
'> ',
'> Please check the [workflow logs](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.',
'> ',
'> 📁 Benchmark artifacts may be available for download from the workflow run.'
].join('\\n');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: fallbackMessage
});
}