-
Notifications
You must be signed in to change notification settings - Fork 3
291 lines (251 loc) · 11.5 KB
/
integration-test.yml
File metadata and controls
291 lines (251 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
name: Integration Test
on:
workflow_dispatch:
inputs:
text:
description: "Text to synthesize"
default: "Hello! This is a test of the Voxtral text to speech system running on CI."
required: false
voice:
description: "Voice preset"
default: "neutral_female"
required: false
jobs:
test:
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-24.04-arm
name: Linux ARM64 (tch)
backend: tch
libtorch-url: https://github.com/second-state/libtorch-releases/releases/download/v2.7.1/libtorch-cxx11-abi-aarch64-2.7.1.tar.gz
- os: macos-latest
name: macOS ARM64 (MLX)
backend: mlx
runs-on: ${{ matrix.os }}
name: Integration (${{ matrix.name }})
steps:
- uses: actions/checkout@v4
- name: Init MLX submodule
if: matrix.backend == 'mlx'
run: git submodule update --init --recursive
- name: Install build dependencies (macOS)
if: runner.os == 'macOS'
run: brew install automake autoconf libtool
- name: Delete Cargo.lock
run: rm -f Cargo.lock
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
# ---------------------------------------------------------------
# Build
# ---------------------------------------------------------------
- name: Download libtorch
if: matrix.backend == 'tch'
run: |
curl -Lo libtorch.tar.gz "${{ matrix.libtorch-url }}"
tar xzf libtorch.tar.gz
- name: Set linker rpath-link (Linux only)
if: runner.os == 'Linux' && matrix.backend == 'tch'
run: echo "RUSTFLAGS=-C link-arg=-Wl,-rpath-link,${{ github.workspace }}/libtorch/lib" >> "$GITHUB_ENV"
- name: Build (tch)
if: matrix.backend == 'tch'
env:
LIBTORCH: ${{ github.workspace }}/libtorch
LIBTORCH_BYPASS_VERSION_CHECK: "1"
run: cargo build --release
- name: Build (MLX)
if: matrix.backend == 'mlx'
run: cargo build --release --no-default-features --features mlx
# ---------------------------------------------------------------
# Download model
# ---------------------------------------------------------------
- name: Download model
run: bash scripts/download_model.sh
- name: Convert voice embeddings to safetensors
run: |
python3 -m venv .venv
.venv/bin/pip install torch safetensors numpy packaging --quiet
.venv/bin/python3 -c "
import torch, os
from safetensors.torch import save_file
d = 'models/voxtral-4b-tts/voice_embedding'
for f in sorted(os.listdir(d)):
if f.endswith('.pt'):
t = torch.load(os.path.join(d, f), map_location='cpu', weights_only=True)
save_file({'embedding': t}, os.path.join(d, f.replace('.pt', '.safetensors')))
print(f'Converted {f}')
"
# ---------------------------------------------------------------
# CLI tests — different voices and text lengths
# ---------------------------------------------------------------
# Note: macOS CI runners (M1) are ~60x slower than local M4 Max
# for MLX inference, so we use shorter text on macOS to stay
# within the 6h GitHub Actions timeout.
- name: "CLI: Short English (neutral_female)"
env:
RUST_LOG: info
run: |
./target/release/voxtral-tts models/voxtral-4b-tts \
--text "Hello." \
--voice neutral_female \
--output cli_short.wav
file cli_short.wav
ls -lh cli_short.wav
- name: "CLI: Medium English (neutral_male)"
if: matrix.backend != 'mlx'
env:
RUST_LOG: info
run: |
./target/release/voxtral-tts models/voxtral-4b-tts \
--text "The quick brown fox jumps over the lazy dog. This is a longer sentence to test audio generation quality." \
--voice neutral_male \
--output cli_medium.wav
file cli_medium.wav
ls -lh cli_medium.wav
- name: "CLI: French (fr_female)"
if: matrix.backend != 'mlx'
run: |
./target/release/voxtral-tts models/voxtral-4b-tts \
--text "Bonjour! Ceci est un test du système Voxtral." \
--voice fr_female \
--output cli_french.wav
file cli_french.wav
ls -lh cli_french.wav
- name: "CLI: Custom voice via input"
if: inputs.voice != 'neutral_female' && matrix.backend != 'mlx'
run: |
./target/release/voxtral-tts models/voxtral-4b-tts \
--text "${{ inputs.text }}" \
--voice "${{ inputs.voice }}" \
--output cli_custom.wav
file cli_custom.wav
ls -lh cli_custom.wav
- name: "CLI: List voices"
run: ./target/release/voxtral-tts models/voxtral-4b-tts --list-voices --text ""
# ---------------------------------------------------------------
# API server tests — non-streaming, streaming, MP3
# ---------------------------------------------------------------
- name: "Server: Start in background"
env:
RUST_LOG: info
run: |
RUST_LOG=info ./target/release/voxtral-tts-server models/voxtral-4b-tts --port 8090 &
SERVER_PID=$!
echo "SERVER_PID=$SERVER_PID" >> "$GITHUB_ENV"
for i in $(seq 1 60); do
if curl -sf http://127.0.0.1:8090/health > /dev/null 2>&1; then
echo "Server ready after ${i}s"
break
fi
sleep 1
done
curl -sf http://127.0.0.1:8090/health || (echo "Server failed to start"; kill $SERVER_PID 2>/dev/null; exit 1)
- name: "Server: GET /health"
run: curl -sf http://127.0.0.1:8090/health | tee /dev/stderr | grep -q ok
- name: "Server: GET /v1/models"
run: curl -sf http://127.0.0.1:8090/v1/models | tee /dev/stderr | grep -q voxtral
- name: "API non-streaming: short (alloy)"
run: |
curl -sf -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"Hello.","voice":"alloy","model":"voxtral-4b-tts"}' \
-o api_short.wav
file api_short.wav
ls -lh api_short.wav
- name: "API streaming: medium (neutral_female)"
if: matrix.backend != 'mlx'
run: |
curl -sN -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"The quick brown fox jumps over the lazy dog. Pack my box with five dozen liquor jugs.","voice":"neutral_female","stream":true}' \
--max-time 1800 \
-o sse_medium.txt || true
grep -q "speech.audio.delta" sse_medium.txt || (echo "Missing speech.audio.delta"; exit 1)
grep -q "speech.audio.done" sse_medium.txt || (echo "Missing speech.audio.done"; exit 1)
echo "Streaming medium test passed"
# Decode base64 PCM chunks into a playable WAV file
python3 -c "
import base64, struct, json
raw_pcm = b''
with open('sse_medium.txt') as f:
for line in f:
line = line.strip()
if not line.startswith('data:'):
continue
payload = json.loads(line[len('data:'):].strip())
if payload.get('type') == 'speech.audio.delta':
raw_pcm += base64.b64decode(payload['delta'])
assert len(raw_pcm) > 0, 'No PCM data decoded from SSE stream'
print(f'Decoded {len(raw_pcm)} bytes of PCM from SSE stream')
sample_rate = 24000
channels = 1
bits = 16
data_size = len(raw_pcm)
with open('api_streaming_medium.wav', 'wb') as out:
out.write(b'RIFF')
out.write(struct.pack('<I', 36 + data_size))
out.write(b'WAVE')
out.write(b'fmt ')
out.write(struct.pack('<IHHIIHH', 16, 1, channels, sample_rate,
sample_rate * channels * bits // 8, channels * bits // 8, bits))
out.write(b'data')
out.write(struct.pack('<I', data_size))
out.write(raw_pcm)
print('Wrote api_streaming_medium.wav')
"
file api_streaming_medium.wav
ls -lh api_streaming_medium.wav
- name: "API non-streaming: long text multi-chunk (neutral_female)"
if: matrix.backend != 'mlx'
run: |
curl -sf -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"Welcome to Voxtral text to speech. This is a test of the sentence chunking feature that splits long input text into multiple chunks. Each sentence is generated independently with its own prefill and generation cycle. The resulting audio chunks are concatenated together to form the complete output. This approach prevents memory issues with very long text sequences. It also keeps prefill time constant regardless of total text length. The quick brown fox jumps over the lazy dog.","voice":"neutral_female","model":"voxtral-4b-tts"}' \
-o api_longtext.wav
file api_longtext.wav
ls -lh api_longtext.wav
SIZE=$(stat -f%z api_longtext.wav 2>/dev/null || stat -c%s api_longtext.wav 2>/dev/null)
echo "API long text WAV size: $SIZE bytes"
[ "$SIZE" -gt 100000 ] || (echo "WAV too small for long text ($SIZE bytes)"; exit 1)
- name: "Server: Validation errors"
run: |
# Empty input -> 400
STATUS=$(curl -s -o /dev/null -w '%{http_code}' -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"","voice":"alloy"}')
echo "Empty input: $STATUS"
[ "$STATUS" = "400" ] || (echo "Expected 400, got $STATUS"; exit 1)
# Invalid format -> 400
STATUS=$(curl -s -o /dev/null -w '%{http_code}' -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"Hello.","voice":"alloy","response_format":"aac"}')
echo "Invalid format: $STATUS"
[ "$STATUS" = "400" ] || (echo "Expected 400, got $STATUS"; exit 1)
# Speed out of range -> 400
STATUS=$(curl -s -o /dev/null -w '%{http_code}' -X POST http://127.0.0.1:8090/v1/audio/speech \
-H "Content-Type: application/json" \
-d '{"input":"Hello.","voice":"alloy","speed":10.0}')
echo "Invalid speed: $STATUS"
[ "$STATUS" = "400" ] || (echo "Expected 400, got $STATUS"; exit 1)
- name: "Server: Stop"
if: always()
run: kill ${{ env.SERVER_PID }} 2>/dev/null || true
# ---------------------------------------------------------------
# Upload audio artifacts
# ---------------------------------------------------------------
- name: Upload generated audio
if: always()
uses: actions/upload-artifact@v4
with:
name: audio-${{ matrix.os }}-${{ matrix.backend }}
path: |
*.wav
sse_medium.txt
- name: Upload binaries
uses: actions/upload-artifact@v4
with:
name: binaries-${{ matrix.os }}-${{ matrix.backend }}
path: |
target/release/voxtral-tts
target/release/voxtral-tts-server