Skip to content

Commit bddf887

Browse files
ofirgluzmanclaude
andcommitted
Bring upstream/main up to date with origin/main
- Add IC-LoRA support with depth (MiDaS DPT-Hybrid) and pose (DW Pose) conditioning pipelines - Embed IC-LoRA UI in Gen Space, Playground, and Video Editor - Add conditioning cache for IC-LoRA pipeline outputs - Refactor model download APIs to explicit model type selection with spec-based sizing and session-tracked progress - Consolidate ModelFileId and ModelFileType into single ModelFileType - Unify IC-LoRA model download flow into main download system (remove separate ic_lora_model_downloader) - Add secure models directory change support with admin token guard - Harden project assets path IPC against renderer abuse - Ensure backend error responses never return empty error messages - Unify output video sizing to match input video aspect ratio Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent f6657f9 commit bddf887

71 files changed

Lines changed: 3566 additions & 1642 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

AGENTS.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ LTX Desktop is an Electron app for AI video generation using LTX models. Three-l
2424
| `pnpm build:mac` / `pnpm build:win` | Full platform builds |
2525
| `pnpm setup:dev:mac` / `pnpm setup:dev:win` | One-time dev environment setup |
2626

27-
Run a single backend test: `cd backend && uv run pytest tests/test_generation.py -v --tb=short`
27+
Run a single backend test file via pnpm: `pnpm backend:test -- tests/test_ic_lora.py`
2828

2929
## CI Checks
3030

@@ -36,7 +36,7 @@ PRs must pass: `pnpm typecheck` + `pnpm backend:test` + frontend Vite build.
3636
- **State management**: React contexts only (`ProjectContext`, `AppSettingsContext`, `KeyboardShortcutsContext`) — no Redux/Zustand
3737
- **Routing**: View-based via `ProjectContext` with views: `home`, `project`, `playground`
3838
- **IPC bridge**: All Electron communication through `window.electronAPI` (defined in `electron/preload.ts`)
39-
- **Backend calls**: Frontend calls `http://localhost:8000` directly
39+
- **Backend calls**: Always use `backendFetch` from `frontend/lib/backend.ts` for app backend HTTP requests (it attaches auth/session details). Do not call `fetch` directly for backend endpoints.
4040
- **Styling**: Tailwind with custom semantic color tokens via CSS variables; utilities from `class-variance-authority` + `clsx` + `tailwind-merge`
4141
- **No frontend tests** currently exist
4242

NOTICES.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,21 @@ used by LTX Desktop.
2222
License: Apache License 2.0
2323
https://huggingface.co/Tongyi-MAI/Z-Image-Turbo
2424

25+
- **DPT-Hybrid (MiDaS 3.0)**
26+
Copyright (c) Intel Corporation.
27+
License: Apache License 2.0
28+
https://huggingface.co/Intel/dpt-hybrid-midas
29+
30+
- **DW Pose TorchScript (BatchSize5)**
31+
Copyright (c) DWPose contributors.
32+
License: Apache License 2.0
33+
https://huggingface.co/hr16/DWPose-TorchScript-BatchSize5
34+
35+
- **YOLOX Person Detector (TorchScript)**
36+
Copyright (c) YOLOX contributors.
37+
License: Apache License 2.0
38+
https://huggingface.co/hr16/yolox-onnx
39+
2540
---
2641

2742
## Python Dependencies

backend/_routes/_admin_guard.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
"""Admin token guard for privileged settings mutations."""
2+
3+
from __future__ import annotations
4+
5+
import hmac
6+
7+
from fastapi import Request
8+
9+
from _routes._errors import HTTPError
10+
11+
12+
def guard_admin_permission(request: Request) -> None:
13+
"""Raise 403 if the request lacks a valid admin token."""
14+
admin_token: str = getattr(request.app.state, "admin_token", "")
15+
provided = request.headers.get("X-Admin-Token", "")
16+
if not admin_token or not provided or not hmac.compare_digest(provided, admin_token):
17+
raise HTTPError(403, "Admin token required")

backend/_routes/ic_lora.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,33 +5,17 @@
55
from fastapi import APIRouter, Depends
66

77
from api_types import (
8-
IcLoraDownloadRequest,
9-
IcLoraDownloadResponse,
108
IcLoraExtractRequest,
119
IcLoraExtractResponse,
1210
IcLoraGenerateRequest,
1311
IcLoraGenerateResponse,
14-
IcLoraListResponse,
1512
)
1613
from state import get_state_service
1714
from app_handler import AppHandler
1815

1916
router = APIRouter(prefix="/api/ic-lora", tags=["ic-lora"])
2017

2118

22-
@router.get("/list-models", response_model=IcLoraListResponse)
23-
def route_ic_lora_list_models(handler: AppHandler = Depends(get_state_service)) -> IcLoraListResponse:
24-
return handler.ic_lora.list_models()
25-
26-
27-
@router.post("/download-model", response_model=IcLoraDownloadResponse)
28-
def route_ic_lora_download(
29-
req: IcLoraDownloadRequest,
30-
handler: AppHandler = Depends(get_state_service),
31-
) -> IcLoraDownloadResponse:
32-
return handler.ic_lora.download_model(req)
33-
34-
3519
@router.post("/extract-conditioning", response_model=IcLoraExtractResponse)
3620
def route_ic_lora_extract(
3721
req: IcLoraExtractRequest,

backend/_routes/models.py

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,15 @@
44

55
import logging
66

7-
from fastapi import APIRouter, Depends
7+
from fastapi import APIRouter, Depends, Query
88

99
from api_types import (
1010
DownloadProgressResponse,
1111
ModelDownloadRequest,
1212
ModelDownloadStartResponse,
1313
ModelInfo,
1414
ModelsStatusResponse,
15+
RequiredModelsResponse,
1516
TextEncoderDownloadResponse,
1617
)
1718
from _routes._errors import HTTPError
@@ -34,8 +35,24 @@ def route_models_status(handler: AppHandler = Depends(get_state_service)) -> Mod
3435

3536

3637
@router.get("/models/download/progress", response_model=DownloadProgressResponse)
37-
def route_download_progress(handler: AppHandler = Depends(get_state_service)) -> DownloadProgressResponse:
38-
return handler.downloads.get_download_progress()
38+
def route_download_progress(
39+
sessionId: str = Query(...),
40+
handler: AppHandler = Depends(get_state_service),
41+
) -> DownloadProgressResponse:
42+
try:
43+
return handler.downloads.get_download_progress(sessionId)
44+
except ValueError as exc:
45+
raise HTTPError(404, str(exc))
46+
47+
48+
@router.get("/models/required-models", response_model=RequiredModelsResponse)
49+
def route_required_models(
50+
skipTextEncoder: bool = Query(default=False),
51+
handler: AppHandler = Depends(get_state_service),
52+
) -> RequiredModelsResponse:
53+
return RequiredModelsResponse(
54+
modelTypes=handler.models.get_required_model_types(skip_text_encoder=skipTextEncoder),
55+
)
3956

4057

4158
@router.post("/models/download", response_model=ModelDownloadStartResponse)
@@ -46,16 +63,12 @@ def route_model_download(
4663
if handler.downloads.is_download_running():
4764
raise HTTPError(409, "Download already in progress")
4865

49-
settings = handler.settings.get_settings_snapshot()
50-
skip_text_encoder = req.skipTextEncoder
51-
if settings.ltx_api_key and not settings.use_local_text_encoder:
52-
skip_text_encoder = True
53-
54-
if handler.downloads.start_model_download(skip_text_encoder=skip_text_encoder):
66+
session_id = handler.downloads.start_model_download(model_types=req.modelTypes)
67+
if session_id:
5568
return ModelDownloadStartResponse(
5669
status="started",
5770
message="Model download started",
58-
skippingTextEncoder=skip_text_encoder,
71+
sessionId=session_id,
5972
)
6073

6174
raise HTTPError(400, "Failed to start download")
@@ -70,7 +83,8 @@ def route_text_encoder_download(handler: AppHandler = Depends(get_state_service)
7083
if files["text_encoder"] is not None:
7184
return TextEncoderDownloadResponse(status="already_downloaded", message="Text encoder already downloaded")
7285

73-
if handler.downloads.start_text_encoder_download():
74-
return TextEncoderDownloadResponse(status="started", message="Text encoder download started")
86+
session_id = handler.downloads.start_text_encoder_download()
87+
if session_id:
88+
return TextEncoderDownloadResponse(status="started", message="Text encoder download started", sessionId=session_id)
7589

7690
raise HTTPError(400, "Failed to start download")

backend/_routes/settings.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,9 @@
44

55
import logging
66

7-
from fastapi import APIRouter, Depends
7+
from fastapi import APIRouter, Depends, Request
88

9+
from _routes._admin_guard import guard_admin_permission
910
from state.app_settings import SettingsResponse, UpdateSettingsRequest, to_settings_response
1011
from api_types import StatusResponse
1112
from state import get_state_service
@@ -24,8 +25,13 @@ def route_get_settings(handler: AppHandler = Depends(get_state_service)) -> Sett
2425
@router.post("/settings", response_model=StatusResponse)
2526
def route_post_settings(
2627
req: UpdateSettingsRequest,
28+
request: Request,
2729
handler: AppHandler = Depends(get_state_service),
2830
) -> StatusResponse:
31+
patch_data = req.model_dump(exclude_unset=True)
32+
if "models_dir" in patch_data or "modelsDir" in patch_data:
33+
guard_admin_permission(request)
34+
2935
_, _after, changed_paths = handler.settings.update_settings(req)
3036
changed_roots = {path.split(".", 1)[0] for path in changed_paths}
3137

backend/api_types.py

Lines changed: 34 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,17 @@
88
from pydantic import BaseModel, Field, StringConstraints
99

1010
NonEmptyPrompt = Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)]
11+
ModelFileType = Literal[
12+
"checkpoint",
13+
"upsampler",
14+
"distilled_lora",
15+
"ic_lora",
16+
"depth_processor",
17+
"person_detector",
18+
"pose_processor",
19+
"text_encoder",
20+
"zit",
21+
]
1122

1223

1324
class ImageConditioningInput(NamedTuple):
@@ -35,19 +46,6 @@ class GenerationState(TypedDict):
3546
total_steps: int
3647

3748

38-
class ModelDownloadState(TypedDict):
39-
status: str # "idle" | "downloading" | "complete" | "error"
40-
current_file: str
41-
current_file_progress: int
42-
total_progress: int
43-
downloaded_bytes: int
44-
total_bytes: int
45-
files_completed: int
46-
total_files: int
47-
error: str | None
48-
speed_mbps: int
49-
50-
5149
JsonObject: TypeAlias = dict[str, object]
5250
VideoCameraMotion = Literal[
5351
"none",
@@ -117,6 +115,7 @@ class ModelInfo(BaseModel):
117115

118116

119117
class ModelFileStatus(BaseModel):
118+
id: ModelFileType
120119
name: str
121120
description: str
122121
downloaded: bool
@@ -149,27 +148,15 @@ class ModelsStatusResponse(BaseModel):
149148

150149
class DownloadProgressResponse(BaseModel):
151150
status: str
152-
currentFile: str
153-
currentFileProgress: int
154-
totalProgress: int
155-
downloadedBytes: int
156-
totalBytes: int
157-
filesCompleted: int
158-
totalFiles: int
151+
current_downloading_file: ModelFileType | None
152+
current_file_progress: int
153+
total_progress: int
154+
total_downloaded_bytes: int
155+
expected_total_bytes: int
156+
completed_files: set[ModelFileType]
157+
all_files: set[ModelFileType]
159158
error: str | None
160-
speedMbps: int
161-
162-
163-
class IcLoraModel(BaseModel):
164-
name: str
165-
path: str
166-
conditioning_type: str
167-
reference_downscale_factor: int
168-
169-
170-
class IcLoraListResponse(BaseModel):
171-
models: list[IcLoraModel]
172-
directory: str
159+
speed_mbps: int
173160

174161

175162
class SuggestGapPromptResponse(BaseModel):
@@ -201,17 +188,10 @@ class RetakeResponse(BaseModel):
201188
class IcLoraExtractResponse(BaseModel):
202189
conditioning: str
203190
original: str
204-
conditioning_type: str
191+
conditioning_type: Literal["canny", "depth", "pose"]
205192
frame_time: float
206193

207194

208-
class IcLoraDownloadResponse(BaseModel):
209-
status: str
210-
path: str | None = None
211-
already_existed: bool | None = None
212-
already_exists: bool | None = None
213-
214-
215195
class IcLoraGenerateResponse(BaseModel):
216196
status: str
217197
video_path: str | None = None
@@ -220,12 +200,13 @@ class IcLoraGenerateResponse(BaseModel):
220200
class ModelDownloadStartResponse(BaseModel):
221201
status: str
222202
message: str | None = None
223-
skippingTextEncoder: bool | None = None
203+
sessionId: str | None = None
224204

225205

226206
class TextEncoderDownloadResponse(BaseModel):
227207
status: str
228208
message: str | None = None
209+
sessionId: str | None = None
229210

230211

231212
class StatusResponse(BaseModel):
@@ -264,8 +245,16 @@ class GenerateImageRequest(BaseModel):
264245
numImages: int = 1
265246

266247

248+
def _default_model_types() -> set[ModelFileType]:
249+
return set()
250+
251+
267252
class ModelDownloadRequest(BaseModel):
268-
skipTextEncoder: bool = False
253+
modelTypes: set[ModelFileType] = Field(default_factory=_default_model_types)
254+
255+
256+
class RequiredModelsResponse(BaseModel):
257+
modelTypes: list[ModelFileType]
269258

270259

271260
class SuggestGapPromptRequest(BaseModel):
@@ -286,13 +275,9 @@ class RetakeRequest(BaseModel):
286275
mode: str = "replace_audio_and_video"
287276

288277

289-
class IcLoraDownloadRequest(BaseModel):
290-
model: str
291-
292-
293278
class IcLoraExtractRequest(BaseModel):
294279
video_path: str
295-
conditioning_type: str = "canny"
280+
conditioning_type: Literal["canny", "depth", "pose"] = "canny"
296281
frame_time: float = 0
297282

298283

@@ -308,15 +293,9 @@ def _default_ic_lora_images() -> list[IcLoraImageInput]:
308293

309294
class IcLoraGenerateRequest(BaseModel):
310295
video_path: str
311-
lora_path: str
312-
conditioning_type: str = "canny"
296+
conditioning_type: Literal["canny", "depth", "pose"]
313297
prompt: NonEmptyPrompt
314298
conditioning_strength: float = 1.0
315-
seed: int = 42
316-
height: int = 512
317-
width: int = 768
318-
num_frames: int = 121
319-
frame_rate: float = 24
320299
num_inference_steps: int = 30
321300
cfg_guidance_scale: float = 1.0
322301
negative_prompt: str = ""

backend/app_factory.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,13 @@ def create_app(
4141
allowed_origins: list[str] | None = None,
4242
title: str = "LTX-2 Video Generation Server",
4343
auth_token: str = "",
44+
admin_token: str = "",
4445
) -> FastAPI:
4546
"""Create a configured FastAPI app bound to the provided handler."""
4647
init_state_service(handler)
4748

4849
app = FastAPI(title=title)
50+
app.state.admin_token = admin_token # type: ignore[attr-defined]
4951
app.add_middleware(
5052
CORSMiddleware,
5153
allow_origins=allowed_origins or DEFAULT_ALLOWED_ORIGINS,
@@ -84,20 +86,22 @@ def _token_matches(candidate: str) -> bool:
8486
pass
8587
return JSONResponse(status_code=401, content={"error": "Unauthorized"})
8688

89+
_FALLBACK = "An unexpected error occurred"
90+
8791
async def _route_http_error_handler(request: Request, exc: Exception) -> JSONResponse:
8892
if isinstance(exc, HTTPError):
8993
log_http_error(request, exc)
90-
return JSONResponse(status_code=exc.status_code, content={"error": exc.detail})
91-
return JSONResponse(status_code=500, content={"error": str(exc)})
94+
return JSONResponse(status_code=exc.status_code, content={"error": exc.detail or _FALLBACK})
95+
return JSONResponse(status_code=500, content={"error": str(exc) or _FALLBACK})
9296

9397
async def _validation_error_handler(request: Request, exc: Exception) -> JSONResponse:
9498
if isinstance(exc, RequestValidationError):
95-
return JSONResponse(status_code=422, content={"error": str(exc)})
96-
return JSONResponse(status_code=422, content={"error": str(exc)})
99+
return JSONResponse(status_code=422, content={"error": str(exc) or _FALLBACK})
100+
return JSONResponse(status_code=422, content={"error": str(exc) or _FALLBACK})
97101

98102
async def _route_generic_error_handler(request: Request, exc: Exception) -> JSONResponse:
99103
log_unhandled_exception(request, exc)
100-
return JSONResponse(status_code=500, content={"error": str(exc)})
104+
return JSONResponse(status_code=500, content={"error": str(exc) or _FALLBACK})
101105

102106
app.add_exception_handler(RequestValidationError, _validation_error_handler)
103107
app.add_exception_handler(HTTPError, _route_http_error_handler)

0 commit comments

Comments
 (0)